]> git.hungrycats.org Git - linux/commitdiff
v2.5.0.11 -> v2.5.1 v2.5.1
authorLinus Torvalds <torvalds@athlon.transmeta.com>
Tue, 5 Feb 2002 07:59:17 +0000 (23:59 -0800)
committerLinus Torvalds <torvalds@athlon.transmeta.com>
Tue, 5 Feb 2002 07:59:17 +0000 (23:59 -0800)
- Al Viro: floppy_eject cleanup, mount cleanups
- Jens Axboe: bio updates
- Ingo Molnar: mempool fixes
- GOTO Masanori: Fix O_DIRECT error handling

174 files changed:
Makefile
arch/m68k/kernel/setup.c
arch/m68k/q40/config.c
arch/ppc/kernel/apus_setup.c
arch/sparc64/kernel/iommu_common.c
arch/sparc64/kernel/iommu_common.h
drivers/acorn/block/fd1772.c
drivers/block/DAC960.c
drivers/block/acsi.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/block_ioctl.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/block/floppy.c
drivers/block/ll_rw_blk.c
drivers/block/nbd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/ps2esdi.c
drivers/block/swim3.c
drivers/block/swim_iop.c
drivers/block/xd.c
drivers/block/z2ram.c
drivers/fc4/fc.c
drivers/fc4/soc.c
drivers/fc4/soc.h
drivers/fc4/socal.c
drivers/fc4/socal.h
drivers/ide/hd.c
drivers/ide/ide-dma.c
drivers/ide/ide-probe.c
drivers/ide/ide.c
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptscsih.c
drivers/message/i2o/i2o_block.c
drivers/scsi/Makefile
drivers/scsi/README.ncr53c8xx
drivers/scsi/aic7xxx_old.c
drivers/scsi/esp.c
drivers/scsi/esp.h
drivers/scsi/hosts.c
drivers/scsi/hosts.h
drivers/scsi/ncr53c8xx.c
drivers/scsi/ncr53c8xx.h
drivers/scsi/qlogicfc.c
drivers/scsi/qlogicisp.c
drivers/scsi/qlogicpti.c
drivers/scsi/qlogicpti.h
drivers/scsi/scsi.c
drivers/scsi/scsi.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_dma.c [deleted file]
drivers/scsi/scsi_ioctl.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_merge.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_syms.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/sr_ioctl.c
drivers/scsi/sr_vendor.c
drivers/scsi/sym53c8xx.c
drivers/scsi/sym53c8xx.h
drivers/scsi/sym53c8xx_2/ChangeLog.txt
drivers/scsi/sym53c8xx_2/sym53c8xx.h
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/scsi/sym53c8xx_2/sym_glue.h
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/scsi/sym53c8xx_2/sym_hipd.h
drivers/scsi/sym53c8xx_comm.h
drivers/scsi/sym53c8xx_defs.h
fs/adfs/adfs.h
fs/adfs/dir_f.c
fs/adfs/dir_fplus.c
fs/adfs/inode.c
fs/adfs/super.c
fs/affs/file.c
fs/affs/super.c
fs/bfs/dir.c
fs/bfs/file.c
fs/bfs/inode.c
fs/bio.c
fs/cramfs/inode.c
fs/efs/dir.c
fs/efs/file.c
fs/efs/inode.c
fs/efs/namei.c
fs/efs/super.c
fs/efs/symlink.c
fs/ext2/balloc.c
fs/ext2/ialloc.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext3/balloc.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext3/super.c
fs/fat/buffer.c
fs/fat/inode.c
fs/freevxfs/vxfs_bmap.c
fs/freevxfs/vxfs_inode.c
fs/freevxfs/vxfs_subr.c
fs/hfs/file.c
fs/hfs/hfs.h
fs/hfs/super.c
fs/hfs/sysdep.c
fs/hpfs/buffer.c
fs/hpfs/file.c
fs/hpfs/hpfs_fn.h
fs/isofs/dir.c
fs/isofs/inode.c
fs/isofs/namei.c
fs/isofs/rock.c
fs/minix/bitmap.c
fs/minix/inode.c
fs/minix/itree_common.c
fs/namespace.c
fs/ncpfs/ncplib_kernel.c
fs/ntfs/fs.c
fs/ntfs/support.c
fs/qnx4/bitmap.c
fs/qnx4/dir.c
fs/qnx4/fsync.c
fs/qnx4/inode.c
fs/reiserfs/fix_node.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/resize.c
fs/reiserfs/stree.c
fs/romfs/inode.c
fs/sysv/balloc.c
fs/sysv/ialloc.c
fs/sysv/itree.c
fs/sysv/super.c
fs/udf/balloc.c
fs/udf/dir.c
fs/udf/directory.c
fs/udf/file.c
fs/udf/inode.c
fs/udf/misc.c
fs/udf/namei.c
fs/udf/partition.c
fs/udf/super.c
fs/udf/symlink.c
fs/udf/udfdecl.h
fs/ufs/balloc.c
fs/ufs/cylinder.c
fs/ufs/dir.c
fs/ufs/inode.c
fs/ufs/super.c
fs/ufs/truncate.c
fs/ufs/util.c
fs/ufs/util.h
include/asm-i386/io.h
include/asm-m68k/machdep.h
include/asm-sparc64/dma.h
include/asm-sparc64/io.h
include/linux/amigaffs.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/blkdev.h.orig [new file with mode: 0644]
include/linux/fd.h
include/linux/fs.h
include/linux/highmem.h
include/linux/iso_fs.h
include/linux/qnx4_fs.h
init/do_mounts.c
kernel/ksyms.c
kernel/signal.c
mm/filemap.c
mm/highmem.c
mm/mempool.c

index a62e69d0f39c975d7db7f60014d1b74f76ced0f3..a5aafb72c08018773c8d808532a285900ae5749e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 5
 SUBLEVEL = 1
-EXTRAVERSION =-pre11
+EXTRAVERSION =
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
index 86007efb1d2883d2464e35f24c5f568b65949dfe..57a954d7e01949d9874dd5fe0d1ce66f16b469fc 100644 (file)
@@ -93,7 +93,6 @@ void (*mach_power_off)( void ) = NULL;
 long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
 #if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY) 
 void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-void (*mach_floppy_eject) (void) = NULL;
 #endif
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int) = NULL;
@@ -514,11 +513,6 @@ void __init floppy_setup(char *str, int *ints)
                mach_floppy_setup (str, ints);
 }
 
-void floppy_eject(void)
-{
-       if (mach_floppy_eject)
-               mach_floppy_eject();
-}
 #endif
 
 /* for "kbd-reset" cmdline param */
index 23690901ce9b4866d27ac2f144085d8f7704b409..4989e67bc19df7b1cc3ead8c8d735018f3be9464 100644 (file)
@@ -36,7 +36,6 @@
 #include <asm/q40_master.h>
 #include <asm/keyboard.h>
 
-extern void floppy_eject(void);
 extern void floppy_setup(char *str, int *ints);
 
 extern int q40kbd_translate(unsigned char scancode, unsigned char *keycode,
index bdbc452bc0e383a80d8181cb6303657fcf89fe74..c3fe77cde972f886ff43c48df5f9eaec79656b52 100644 (file)
@@ -106,7 +106,6 @@ void (*mach_reset)( void );
 long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
 #if defined(CONFIG_AMIGA_FLOPPY)
 void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-void (*mach_floppy_eject) (void) = NULL;
 #endif
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int) = NULL;
@@ -404,12 +403,6 @@ void floppy_setup(char *str, int *ints)
        if (mach_floppy_setup)
                mach_floppy_setup (str, ints);
 }
-
-void floppy_eject(void)
-{
-       if (mach_floppy_eject)
-               mach_floppy_eject();
-}
 #endif
 
 /*********************************************************** MEMORY */
index e9d4bea7cc72233c55995db237becc7cf928c3eb..134be5cd793cfad601ff02ce9e559837aa55e073 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: iommu_common.c,v 1.6 2001/10/09 02:24:33 davem Exp $
+/* $Id: iommu_common.c,v 1.8 2001/12/11 11:13:06 davem Exp $
  * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
  *
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -66,7 +66,9 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
 
        daddr = dma_sg->dma_address;
        sglen = sg->length;
-       sgaddr = (unsigned long) sg->address;
+       sgaddr = (unsigned long) (sg->address ?
+                                 sg->address :
+                                 page_address(sg->page) + sg->offset);
        while (dlen > 0) {
                unsigned long paddr;
 
@@ -116,7 +118,9 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
                sg++;
                if (--nents <= 0)
                        break;
-               sgaddr = (unsigned long) sg->address;
+               sgaddr = (unsigned long) (sg->address ?
+                                         sg->address :
+                                         page_address(sg->page) + sg->offset);
                sglen = sg->length;
        }
        if (dlen < 0) {
@@ -197,14 +201,21 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
        unsigned long prev;
        u32 dent_addr, dent_len;
 
-       prev  = (unsigned long) sg->address;
+       prev  = (unsigned long) (sg->address ?
+                                sg->address :
+                                page_address(sg->page) + sg->offset);
        prev += (unsigned long) (dent_len = sg->length);
-       dent_addr = (u32) ((unsigned long)sg->address & (IO_PAGE_SIZE - 1UL));
+       dent_addr = (u32) ((unsigned long)(sg->address ?
+                                          sg->address :
+                                          page_address(sg->page) + sg->offset)
+                          & (IO_PAGE_SIZE - 1UL));
        while (--nents) {
                unsigned long addr;
 
                sg++;
-               addr = (unsigned long) sg->address;
+               addr = (unsigned long) (sg->address ?
+                                       sg->address :
+                                       page_address(sg->page) + sg->offset);
                if (! VCONTIG(prev, addr)) {
                        dma_sg->dma_address = dent_addr;
                        dma_sg->dma_length = dent_len;
index 4d0795ad4eff9043341ae80b6de39d7f0c5ad0c8..039744070ff675cb286f0c459693df46a1508e2d 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: iommu_common.h,v 1.4 2001/10/09 02:24:33 davem Exp $
+/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
  * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
  *
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -6,8 +6,9 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
 
-#include <asm/page.h>
 #include <asm/iommu.h>
 #include <asm/scatterlist.h>
 
index d9cf8b505ce68b227a75510b2d9b4866691906fd..1667797e2662d8f9b46fa4d97ae4c9808d48ca35 100644 (file)
@@ -1620,7 +1620,3 @@ int fd1772_init(void)
 
        return 0;
 }
-
-void floppy_eject(void)
-{
-}
index 55d3fbe7e7d47c372ee1160e8c8ca3d88e768ada..0f0060dbb6a36183c946643408844dfb02d2f5c1 100644 (file)
@@ -1948,8 +1948,11 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
   RequestQueue = BLK_DEFAULT_QUEUE(MajorNumber);
   blk_init_queue(RequestQueue, DAC960_RequestFunction);
   RequestQueue->queuedata = Controller;
-  RequestQueue->max_segments = Controller->DriverScatterGatherLimit;
-  RequestQueue->max_sectors = Controller->MaxBlocksPerCommand;
+  blk_queue_max_hw_segments(RequestQueue,
+                           Controller->DriverScatterGatherLimit);
+  blk_queue_max_phys_segments(RequestQueue, ~0);
+  blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
+
   Controller->RequestQueue = RequestQueue;
   /*
     Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
@@ -2889,7 +2892,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
   Command->LogicalDriveNumber = DAC960_LogicalDriveNumber(Request->rq_dev);
   Command->BlockNumber = Request->sector;
   Command->BlockCount = Request->nr_sectors;
-  Command->SegmentCount = Request->nr_segments;
+  Command->SegmentCount = Request->nr_phys_segments;
   Command->BufferHeader = Request->bio;
   Command->RequestBuffer = Request->buffer;
   blkdev_dequeue_request(Request);
index 916a192e5e5ef3fde01b0817228da6a5f7a12cc5..28e5ae8e04f5308277853774dfa4a6511685607a 100644 (file)
@@ -253,6 +253,8 @@ static int                          CurrentNReq;
 static int                             CurrentNSect;
 static char                            *CurrentBuffer;
 
+static spinlock_t                      acsi_lock = SPIN_LOCK_UNLOCKED;
+
 
 #define SET_TIMER()    mod_timer(&acsi_timer, jiffies + ACSI_TIMEOUT)
 #define CLEAR_TIMER()  del_timer(&acsi_timer)
@@ -1784,7 +1786,7 @@ int acsi_init( void )
        phys_acsi_buffer = virt_to_phys( acsi_buffer );
        STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
        
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &acsi_lock);
        read_ahead[MAJOR_NR] = 8;               /* 8 sector (4kB) read-ahead */
        add_gendisk(&acsi_gendisk);
 
index 45d30ce457d526def04e66f64feeb339b8aa069e..c2e353b88d9e255f7ff2890b55ab99c99207bce7 100644 (file)
@@ -174,6 +174,8 @@ static int writepending;
 static int writefromint;
 static char *raw_buf;
 
+static spinlock_t amiflop_lock = SPIN_LOCK_UNLOCKED;
+
 #define RAW_BUF_SIZE 30000  /* size of raw disk data */
 
 /*
@@ -1855,7 +1857,7 @@ int __init amiga_floppy_init(void)
        post_write_timer.data = 0;
        post_write_timer.function = post_write;
   
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &amiflop_lock);
        blksize_size[MAJOR_NR] = floppy_blocksizes;
        blk_size[MAJOR_NR] = floppy_sizes;
 
index eea26ff9e9d021eee55500e885085acd20d3973e..1386f3eba58de124b89f2bd6bd5ad3ed423e67f7 100644 (file)
@@ -156,6 +156,8 @@ static int StartDiskType[] = {
 
 static int DriveType = TYPE_HD;
 
+static spinlock_t ataflop_lock = SPIN_LOCK_UNLOCKED;
+
 /* Array for translating minors into disk formats */
 static struct {
        int      index;
@@ -2013,7 +2015,7 @@ int __init atari_floppy_init (void)
 
        blk_size[MAJOR_NR] = floppy_sizes;
        blksize_size[MAJOR_NR] = floppy_blocksizes;
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &ataflop_lock);
 
        printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
               DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
index a894888483c979ca0eb1a9e030afab95b29c2651..75d71ca05c98606e386894a19a9e74343701d8ad 100644 (file)
@@ -76,5 +76,8 @@ int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg)
                        err = -ENOTTY;
        }
 
+#if 0
+       blk_put_queue(q);
+#endif
        return err;
 }
index 74aca53678130f736e5d65ab4fa114bdff59311d..b038dde8b39d4480db463df4a24619376d238440 100644 (file)
@@ -1219,7 +1219,7 @@ queue:
                goto startio;
 
        creq = elv_next_request(q);
-       if (creq->nr_segments > MAXSGENTRIES)
+       if (creq->nr_phys_segments > MAXSGENTRIES)
                 BUG();
 
         if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR )
@@ -1866,9 +1866,16 @@ static int __init cciss_init_one(struct pci_dev *pdev,
 
        q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
         q->queuedata = hba[i];
+       spin_lock_init(&hba[i]->lock);
         blk_init_queue(q, do_cciss_request, &hba[i]->lock);
        blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
-       blk_queue_max_segments(q, MAXSGENTRIES);
+
+       /* This is a hardware imposed limit. */
+       blk_queue_max_hw_segments(q, MAXSGENTRIES);
+
+       /* This is a limit in the driver and could be eliminated. */
+       blk_queue_max_phys_segments(q, MAXSGENTRIES);
+
        blk_queue_max_sectors(q, 512);
 
        /* fill in the other Kernel structs */
index 5f85cb0b5b6b9c8db8b0022e74b437170bfe8a89..5f2298ba9720df5da1319a38cec4304dfb290228 100644 (file)
@@ -467,9 +467,16 @@ int __init cpqarray_init(void)
 
                q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
                q->queuedata = hba[i];
+               spin_lock_init(&hba[i]->lock);
                blk_init_queue(q, do_ida_request, &hba[i]->lock);
                blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
-               blk_queue_max_segments(q, SG_MAX);
+
+               /* This is a hardware imposed limit. */
+               blk_queue_max_hw_segments(q, SG_MAX);
+
+               /* This is a driver limit and could be eliminated. */
+               blk_queue_max_phys_segments(q, SG_MAX);
+
                blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
                read_ahead[MAJOR_NR+i] = READ_AHEAD;
 
@@ -864,7 +871,7 @@ queue_next:
                goto startio;
 
        creq = elv_next_request(q);
-       if (creq->nr_segments > SG_MAX)
+       if (creq->nr_phys_segments > SG_MAX)
                BUG();
 
        if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR || h->ctlr > nr_ctlr)
index 2417023debafe3694a56655c42ed7c0f7d1c826c..2fcdcc59ace7e792d59813afb6ddc09bcb4d3673 100644 (file)
@@ -204,7 +204,7 @@ static int use_virtual_dma;
  * record each buffers capabilities
  */
 
-static spinlock_t floppy_lock;
+static spinlock_t floppy_lock = SPIN_LOCK_UNLOCKED;
 
 static unsigned short virtual_dma_port=0x3f0;
 void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
@@ -4479,21 +4479,4 @@ MODULE_LICENSE("GPL");
 
 __setup ("floppy=", floppy_setup);
 module_init(floppy_init)
-
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-/* This should only be called at boot time when we're sure that there's no
- * resource contention. */
-void floppy_eject(void)
-{
-       int dummy;
-       if (have_no_fdc)
-               return;
-       if(floppy_grab_irq_and_dma()==0)
-       {
-               lock_fdc(MAXTIMEOUT,0);
-               dummy=fd_eject(0);
-               process_fd_request();
-               floppy_release_irq_and_dma();
-       }
-}
 #endif
index 9849061f045aac78b269f1e930c16a5d983c191f..e5c93889d20912fce10f666290654033ee9ef9f0 100644 (file)
@@ -144,7 +144,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        /*
         * set defaults
         */
-       q->max_segments = MAX_SEGMENTS;
+       q->max_phys_segments = MAX_PHYS_SEGMENTS;
+       q->max_hw_segments = MAX_HW_SEGMENTS;
        q->make_request_fn = mfn;
        blk_queue_max_sectors(q, MAX_SECTORS);
        blk_queue_hardsect_size(q, 512);
@@ -170,6 +171,18 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
        unsigned long mb = dma_addr >> 20;
        static request_queue_t *last_q;
 
+       /*
+        * set appropriate bounce gfp mask -- unfortunately we don't have a
+        * full 4GB zone, so we have to resort to low memory for any bounces.
+        * ISA has its own < 16MB zone.
+        */
+       if (dma_addr == BLK_BOUNCE_ISA) {
+               init_emergency_isa_pool();
+               q->bounce_gfp = GFP_NOIO | GFP_DMA;
+               printk("isa pfn %lu, max low %lu, max %lu\n", bounce_pfn, blk_max_low_pfn, blk_max_pfn);
+       } else
+               q->bounce_gfp = GFP_NOHIGHIO;
+
        /*
         * keep this for debugging for now...
         */
@@ -178,7 +191,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
                if (dma_addr == BLK_BOUNCE_ANY)
                        printk("no I/O memory limit\n");
                else
-                       printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (u64) dma_addr);
+                       printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (long long) dma_addr);
        }
 
        q->bounce_pfn = bounce_pfn;
@@ -201,17 +214,34 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
 }
 
 /**
- * blk_queue_max_segments - set max segments for a request for this queue
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
  * @max_segments:  max number of segments
  *
  * Description:
  *    Enables a low level driver to set an upper limit on the number of
- *    data segments in a request
+ *    physical data segments in a request.  This would be the largest sized
+ *    scatter list the driver could handle.
  **/
-void blk_queue_max_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
 {
-       q->max_segments = max_segments;
+       q->max_phys_segments = max_segments;
+}
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q:  the request queue for the device
+ * @max_segments:  max number of segments
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the number of
+ *    hw data segments in a request.  This would be the largest number of
+ *    address/length pairs the host adapter can actually give as once
+ *    to the device.
+ **/
+void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+{
+       q->max_hw_segments = max_segments;
 }
 
 /**
@@ -325,44 +355,78 @@ static int ll_10byte_cmd_build(request_queue_t *q, struct request *rq)
 void blk_recount_segments(request_queue_t *q, struct bio *bio)
 {
        struct bio_vec *bv, *bvprv = NULL;
-       int i, nr_segs, seg_size, cluster;
+       int i, nr_phys_segs, nr_hw_segs, seg_size, cluster;
 
        if (unlikely(!bio->bi_io_vec))
                return;
 
        cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-       seg_size = nr_segs = 0;
+       seg_size = nr_phys_segs = nr_hw_segs = 0;
        bio_for_each_segment(bv, bio, i) {
                if (bvprv && cluster) {
-                       if (seg_size + bv->bv_len > q->max_segment_size)
+                       int phys, seg;
+
+                       if (seg_size + bv->bv_len > q->max_segment_size) {
+                               nr_phys_segs++;
                                goto new_segment;
-                       if (!BIOVEC_MERGEABLE(bvprv, bv))
+                       }
+
+                       phys = BIOVEC_PHYS_MERGEABLE(bvprv, bv);
+                       seg = BIOVEC_SEG_BOUNDARY(q, bvprv, bv);
+                       if (!phys || !seg)
+                               nr_phys_segs++;
+                       if (!seg)
                                goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+
+                       if (!BIOVEC_VIRT_MERGEABLE(bvprv, bv))
                                goto new_segment;
 
                        seg_size += bv->bv_len;
                        bvprv = bv;
                        continue;
+               } else {
+                       nr_phys_segs++;
                }
 new_segment:
-               nr_segs++;
+               nr_hw_segs++;
                bvprv = bv;
-               seg_size = 0;
+               seg_size = bv->bv_len;
        }
 
-       bio->bi_hw_seg = nr_segs;
+       bio->bi_phys_segments = nr_phys_segs;
+       bio->bi_hw_segments = nr_hw_segs;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 
 
-inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
-                           struct bio *nxt)
+inline int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+                                  struct bio *nxt)
+{
+       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+               return 0;
+
+       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+               return 0;
+       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+               return 0;
+
+       /*
+        * bio and nxt are contigous in memory, check if the queue allows
+        * these two to be merged into one
+        */
+       if (BIO_SEG_BOUNDARY(q, bio, nxt))
+               return 1;
+
+       return 0;
+}
+
+inline int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+                                struct bio *nxt)
 {
        if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
                return 0;
 
-       if (!BIO_CONTIG(bio, nxt))
+       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
                return 0;
        if (bio->bi_size + nxt->bi_size > q->max_segment_size)
                return 0;
@@ -379,7 +443,7 @@ inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
 
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_segments entries
+ * must make sure sg can hold rq->nr_phys_segments entries
  */
 int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
 {
@@ -405,7 +469,7 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
                                if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
                                        goto new_segment;
 
-                               if (!BIOVEC_MERGEABLE(bvprv, bvec))
+                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
                                        goto new_segment;
                                if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                                        goto new_segment;
@@ -413,11 +477,6 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
                                sg[nsegs - 1].length += nbytes;
                        } else {
 new_segment:
-                               if (nsegs >= q->max_segments) {
-                                       printk("map: %d >= %d, i %d, segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_segments, rq->nr_sectors);
-                                       BUG();
-                               }
-
                                sg[nsegs].address = NULL;
                                sg[nsegs].page = bvec->bv_page;
                                sg[nsegs].length = nbytes;
@@ -436,18 +495,44 @@ new_segment:
  * the standard queue merge functions, can be overridden with device
  * specific ones if so desired
  */
-static inline int ll_new_segment(request_queue_t *q, struct request *req,
-                                struct bio *bio)
+
+static inline int ll_new_mergeable(request_queue_t *q,
+                                  struct request *req,
+                                  struct bio *bio)
 {
-       int nr_segs = bio_hw_segments(q, bio);
+       int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_segments + nr_segs <= q->max_segments) {
-               req->nr_segments += nr_segs;
-               return 1;
+       if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+               req->flags |= REQ_NOMERGE;
+               return 0;
        }
 
-       req->flags |= REQ_NOMERGE;
-       return 0;
+       /*
+        * A hw segment is just getting larger, bump just the phys
+        * counter.
+        */
+       req->nr_phys_segments += nr_phys_segs;
+       return 1;
+}
+
+static inline int ll_new_hw_segment(request_queue_t *q,
+                                   struct request *req,
+                                   struct bio *bio)
+{
+       int nr_hw_segs = bio_hw_segments(q, bio);
+
+       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments) {
+               req->flags |= REQ_NOMERGE;
+               return 0;
+       }
+
+       /*
+        * This will form the start of a new hw segment.  Bump both
+        * counters.
+        */
+       req->nr_hw_segments += nr_hw_segs;
+       req->nr_phys_segments += bio_phys_segments(q, bio);
+       return 1;
 }
 
 static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
@@ -458,7 +543,11 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
                return 0;
        }
 
-       return ll_new_segment(q, req, bio);
+       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail),
+                                 __BVEC_START(bio)))
+               return ll_new_mergeable(q, req, bio);
+
+       return ll_new_hw_segment(q, req, bio);
 }
 
 static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
@@ -469,21 +558,49 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
                return 0;
        }
 
-       return ll_new_segment(q, req, bio);
+       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio),
+                                 __BVEC_START(req->bio)))
+               return ll_new_mergeable(q, req, bio);
+
+       return ll_new_hw_segment(q, req, bio);
 }
 
 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
                                struct request *next)
 {
-       int total_segments = req->nr_segments + next->nr_segments;
+       int total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+       int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
 
-       if (blk_contig_segment(q, req->biotail, next->bio))
-               total_segments--;
+       /*
+        * First check if the either of the requests are re-queued
+        * requests.  Can't merge them if they are.
+        */
+       if (req->special || next->special)
+               return 0;
+
+       /*
+        * Will it become to large?
+        */
+       if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+               return 0;
+
+       total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+       if (blk_phys_contig_segment(q, req->biotail, next->bio))
+               total_phys_segments--;
+
+       if (total_phys_segments > q->max_phys_segments)
+               return 0;
+
+       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+       if (blk_hw_contig_segment(q, req->biotail, next->bio))
+               total_hw_segments--;
     
-       if (total_segments > q->max_segments)
+       if (total_hw_segments > q->max_hw_segments)
                return 0;
 
-       req->nr_segments = total_segments;
+       /* Merge is OK... */
+       req->nr_phys_segments = total_phys_segments;
+       req->nr_hw_segments = total_hw_segments;
        return 1;
 }
 
@@ -1107,7 +1224,7 @@ get_rq:
        req->hard_sector = req->sector = sector;
        req->hard_nr_sectors = req->nr_sectors = nr_sectors;
        req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
-       req->nr_segments = bio->bi_vcnt;
+       req->nr_phys_segments = bio_phys_segments(q, bio);
        req->nr_hw_segments = bio_hw_segments(q, bio);
        req->buffer = bio_data(bio);    /* see ->buffer comment above */
        req->waiting = NULL;
@@ -1201,7 +1318,7 @@ void generic_make_request(struct bio *bio)
                                printk(KERN_INFO "%s: rw=%ld, want=%ld, limit=%Lu\n",
                                       kdevname(bio->bi_dev), bio->bi_rw,
                                       (sector + nr_sectors)>>1,
-                                      (u64) blk_size[major][minor]);
+                                      (long long) blk_size[major][minor]);
                        }
                        set_bit(BIO_EOF, &bio->bi_flags);
                        goto end_io;
@@ -1221,7 +1338,7 @@ void generic_make_request(struct bio *bio)
                if (!q) {
                        printk(KERN_ERR
                               "generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
-                              kdevname(bio->bi_dev), (u64) bio->bi_sector);
+                              kdevname(bio->bi_dev), (long long) bio->bi_sector);
 end_io:
                        bio->bi_end_io(bio, nr_sectors);
                        break;
@@ -1433,7 +1550,27 @@ sorry:
 extern int stram_device_init (void);
 #endif
 
-inline void blk_recalc_request(struct request *rq, int nsect)
+inline void blk_recalc_rq_segments(struct request *rq)
+{
+       struct bio *bio;
+       int nr_phys_segs, nr_hw_segs;
+
+       rq->buffer = bio_data(rq->bio);
+
+       nr_phys_segs = nr_hw_segs = 0;
+       rq_for_each_bio(bio, rq) {
+               /* Force bio hw/phys segs to be recalculated. */
+               bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+               nr_phys_segs += bio_phys_segments(rq->q, bio);
+               nr_hw_segs += bio_hw_segments(rq->q, bio);
+       }
+
+       rq->nr_phys_segments = nr_phys_segs;
+       rq->nr_hw_segments = nr_hw_segs;
+}
+
+inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
        rq->hard_sector += nsect;
        rq->hard_nr_sectors -= nsect;
@@ -1451,8 +1588,6 @@ inline void blk_recalc_request(struct request *rq, int nsect)
                printk("blk: request botched\n");
                rq->nr_sectors = rq->current_nr_sectors;
        }
-
-       rq->buffer = bio_data(rq->bio);
 }
 
 /**
@@ -1495,7 +1630,8 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
                        bio->bi_size -= residual;
                        bio_iovec(bio)->bv_offset += residual;
                        bio_iovec(bio)->bv_len -= residual;
-                       blk_recalc_request(req, nr_sectors);
+                       blk_recalc_rq_sectors(req, nr_sectors);
+                       blk_recalc_rq_segments(req);
                        return 1;
                }
 
@@ -1518,13 +1654,15 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
                }
 
                if ((bio = req->bio)) {
-                       blk_recalc_request(req, nsect);
+                       blk_recalc_rq_sectors(req, nsect);
 
                        /*
                         * end more in this run, or just return 'not-done'
                         */
-                       if (unlikely(nr_sectors <= 0))
+                       if (unlikely(nr_sectors <= 0)) {
+                               blk_recalc_rq_segments(req);
                                return 1;
+                       }
                }
        }
 
@@ -1605,7 +1743,8 @@ EXPORT_SYMBOL(generic_unplug_device);
 EXPORT_SYMBOL(blk_attempt_remerge);
 EXPORT_SYMBOL(blk_max_low_pfn);
 EXPORT_SYMBOL(blk_queue_max_sectors);
-EXPORT_SYMBOL(blk_queue_max_segments);
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
 EXPORT_SYMBOL(blk_queue_max_segment_size);
 EXPORT_SYMBOL(blk_queue_hardsect_size);
 EXPORT_SYMBOL(blk_queue_segment_boundary);
@@ -1613,5 +1752,6 @@ EXPORT_SYMBOL(blk_rq_map_sg);
 EXPORT_SYMBOL(blk_nohighio);
 EXPORT_SYMBOL(blk_dump_rq_flags);
 EXPORT_SYMBOL(submit_bio);
-EXPORT_SYMBOL(blk_contig_segment);
 EXPORT_SYMBOL(blk_queue_assign_lock);
+EXPORT_SYMBOL(blk_phys_contig_segment);
+EXPORT_SYMBOL(blk_hw_contig_segment);
index c16b6163af895ba98957511ed66fea112d01ba5a..38b2514d71d083a3a9b8355d567bfae956ee103c 100644 (file)
@@ -62,7 +62,7 @@ static u64 nbd_bytesizes[MAX_NBD];
 static struct nbd_device nbd_dev[MAX_NBD];
 static devfs_handle_t devfs_handle;
 
-static spinlock_t nbd_lock;
+static spinlock_t nbd_lock = SPIN_LOCK_UNLOCKED;
 
 #define DEBUG( s )
 /* #define DEBUG( s ) printk( s ) 
index 1430fcb800109c85d62e7f12b82eb929432021c1..8e1374c18d66c226a00f036aab23ba9112e66fd2 100644 (file)
@@ -166,6 +166,8 @@ static int pd_drive_count;
 
 #include <asm/uaccess.h>
 
+static spinlock_t pd_lock = SPIN_LOCK_UNLOCKED;
+
 #ifndef MODULE
 
 #include "setup.h"
@@ -394,7 +396,7 @@ int pd_init (void)
                 return -1;
         }
        q = BLK_DEFAULT_QUEUE(MAJOR_NR);
-       blk_init_queue(q, DEVICE_REQUEST);
+       blk_init_queue(q, DEVICE_REQUEST, &pd_lock);
        blk_queue_max_sectors(q, cluster);
         read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
         
@@ -875,9 +877,9 @@ static void pd_next_buf( int unit )
 
 {      long    saved_flags;
 
-       spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+       spin_lock_irqsave(&pd_lock,saved_flags);
        end_request(1);
-       if (!pd_run) {  spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+       if (!pd_run) {  spin_unlock_irqrestore(&pd_lock,saved_flags);
                        return; 
        }
        
@@ -893,7 +895,7 @@ static void pd_next_buf( int unit )
 
        pd_count = CURRENT->current_nr_sectors;
        pd_buf = CURRENT->buffer;
-       spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+       spin_unlock_irqrestore(&pd_lock,saved_flags);
 }
 
 static void do_pd_read( void )
@@ -916,11 +918,11 @@ static void do_pd_read_start( void )
                         pi_do_claimed(PI,do_pd_read_start);
                        return;
                 }
-               spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+               spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
                do_pd_request(NULL);
-               spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+               spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
         }
         pd_ide_command(unit,IDE_READ,pd_block,pd_run);
@@ -940,11 +942,11 @@ static void do_pd_read_drq( void )
                         pi_do_claimed(PI,do_pd_read_start);
                         return;
                 }
-               spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+               spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
                do_pd_request(NULL);
-               spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+               spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
             }
             pi_read_block(PI,pd_buf,512);
@@ -955,11 +957,11 @@ static void do_pd_read_drq( void )
            if (!pd_count) pd_next_buf(unit);
         }
         pi_disconnect(PI);
-       spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+       spin_lock_irqsave(&pd_lock,saved_flags);
         end_request(1);
         pd_busy = 0;
        do_pd_request(NULL);
-       spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+       spin_unlock_irqrestore(&pd_lock,saved_flags);
 }
 
 static void do_pd_write( void )
@@ -982,11 +984,11 @@ static void do_pd_write_start( void )
                        pi_do_claimed(PI,do_pd_write_start);
                         return;
                 }
-               spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+               spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
                do_pd_request(NULL);
-               spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+               spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
         }
         pd_ide_command(unit,IDE_WRITE,pd_block,pd_run);
@@ -998,11 +1000,11 @@ static void do_pd_write_start( void )
                         pi_do_claimed(PI,do_pd_write_start);
                         return;
                 }
-               spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+               spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
                do_pd_request(NULL);
-                spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+                spin_unlock_irqrestore(&pd_lock,saved_flags);
                return;
             }
             pi_write_block(PI,pd_buf,512);
@@ -1027,19 +1029,19 @@ static void do_pd_write_done( void )
                         pi_do_claimed(PI,do_pd_write_start);
                         return;
                 }
-               spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+               spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
                do_pd_request(NULL);
-               spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+               spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
         }
         pi_disconnect(PI);
-       spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+       spin_lock_irqsave(&pd_lock,saved_flags);
         end_request(1);
         pd_busy = 0;
        do_pd_request(NULL);
-       spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+       spin_unlock_irqrestore(&pd_lock,saved_flags);
 }
 
 /* end of pd.c */
index e49565417eda42eb43ea0078c8659f5f85de8c0e..c83901c70d366811f52cff658de76ef288ffcfae 100644 (file)
@@ -361,7 +361,8 @@ int pf_init (void)      /* preliminary initialisation */
         }
        q = BLK_DEFAULT_QUEUE(MAJOR_NR);
        blk_init_queue(q, DEVICE_REQUEST, &pf_spin_lock);
-       blk_queue_max_segments(q, cluster);
+       blk_queue_max_phys_segments(q, cluster);
+       blk_queue_max_hw_segments(q, cluster);
         read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
         
        for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
index b248b437bf7481043a91c2d694625a6a9afd561f..dec02f0f832eb744db2dbc81d87058fcab6e23c4 100644 (file)
@@ -66,8 +66,6 @@
 #define TYPE_0_CMD_BLK_LENGTH 2
 #define TYPE_1_CMD_BLK_LENGTH 4
 
-#define PS2ESDI_LOCK (&((BLK_DEFAULT_QUEUE(MAJOR_NR))->queue_lock))
-
 static void reset_ctrl(void);
 
 int ps2esdi_init(void);
@@ -130,6 +128,7 @@ static int intg_esdi = 0;       /* If integrated adapter */
 struct ps2esdi_i_struct {
        unsigned int head, sect, cyl, wpcom, lzone, ctl;
 };
+static spinlock_t ps2esdi_lock = SPIN_LOCK_UNLOCKED;
 
 #if 0
 #if 0                          /* try both - I don't know which one is better... UB */
@@ -180,7 +179,7 @@ int __init ps2esdi_init(void)
                return -1;
        }
        /* set up some global information - indicating device specific info */
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &ps2esdi_lock);
        read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
 
        /* some minor housekeeping - setup the global gendisk structure */
@@ -954,10 +953,10 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
                break;
        }
        if(ending != -1) {
-               spin_lock_irqsave(PS2ESDI_LOCK, flags);
+               spin_lock_irqsave(ps2esdi_LOCK, flags);
                end_request(ending);
                do_ps2esdi_request(BLK_DEFAULT_QUEUE(MAJOR_NR));
-               spin_unlock_irqrestore(PS2ESDI_LOCK, flags);
+               spin_unlock_irqrestore(ps2esdi_LOCK, flags);
        }
 }                              /* handle interrupts */
 
index f4dee49d40f29c396d2c4e42839f1b1b7d4c0b3d..ad3ead3611b5d6bd534944d80b0891c523ed6793 100644 (file)
@@ -203,6 +203,7 @@ struct floppy_state {
 
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
+static spinlock_t swim3_lock = SPIN_LOCK_UNLOCKED;
 
 static unsigned short write_preamble[] = {
        0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
@@ -807,16 +808,6 @@ static int fd_eject(struct floppy_state *fs)
        return err;
 }
 
-int swim3_fd_eject(int devnum)
-{
-       if (devnum >= floppy_count)
-               return -ENODEV;
-       /* Do not check this - this function should ONLY be called early
-        * in the boot process! */
-       /* if (floppy_states[devnum].ref_count != 1) return -EBUSY; */
-       return fd_eject(&floppy_states[devnum]);
-}
-
 static struct floppy_struct floppy_type =
        { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };    /*  7 1.44MB 3.5"   */
 
@@ -1041,7 +1032,7 @@ int swim3_init(void)
                               MAJOR_NR);
                        return -EBUSY;
                }
-               blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+               blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST,&swim3_lock);
                blksize_size[MAJOR_NR] = floppy_blocksizes;
                blk_size[MAJOR_NR] = floppy_sizes;
        }
index 29b8f8213dea10b3d600b2c4c6037172e783a1a3..0fbe21e399869ec9444aebb55589f7e6a5268576 100644 (file)
@@ -84,6 +84,8 @@ static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_blocksizes[2] = {512,512};
 static int floppy_sizes[2] = {2880,2880};
 
+static spinlock_t swim_iop_lock = SPIN_LOCK_UNLOCKED;
+
 static char *drive_names[7] = {
        "not installed",        /* DRV_NONE    */
        "unknown (1)",          /* DRV_UNKNOWN */
@@ -147,7 +149,7 @@ int swimiop_init(void)
                       MAJOR_NR);
                return -EBUSY;
        }
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &swim_iop_lock);
        blksize_size[MAJOR_NR] = floppy_blocksizes;
        blk_size[MAJOR_NR] = floppy_sizes;
 
index 4357b317b28c4a4555940421bf801cb1440c1ac9..55587b4640ad8cd9745b1c3efac99ba3cc0dc8b9 100644 (file)
@@ -122,6 +122,8 @@ static struct hd_struct xd_struct[XD_MAXDRIVES << 6];
 static int xd_sizes[XD_MAXDRIVES << 6], xd_access[XD_MAXDRIVES];
 static int xd_blocksizes[XD_MAXDRIVES << 6];
 
+static spinlock_t xd_lock = SPIN_LOCK_UNLOCKED;
+
 extern struct block_device_operations xd_fops;
 
 static struct gendisk xd_gendisk = {
@@ -170,7 +172,7 @@ int __init xd_init (void)
                return -1;
        }
        devfs_handle = devfs_mk_dir (NULL, xd_gendisk.major_name, NULL);
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &xd_lock);
        read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
        add_gendisk(&xd_gendisk);
        xd_geninit();
index e050e31ce70583cc5511ace9077985631ffb3c9c..f7d35d7305d8fbb7cdf45862ce90e848968c17c4 100644 (file)
@@ -68,6 +68,8 @@ static int chip_count       = 0;
 static int list_count       = 0;
 static int current_device   = -1;
 
+static spinlock_t z2ram_lock = SPIN_LOCK_UNLOCKED;
+
 static void
 do_z2_request( request_queue_t * q )
 {
@@ -364,7 +366,7 @@ z2_init( void )
            }
     }    
    
-    blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+    blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUES, &z2ram_lock);
     blksize_size[ MAJOR_NR ] = z2_blocksizes;
     blk_size[ MAJOR_NR ] = z2_sizes;
 
index 6a128292379a79dbb49d903a70689a7a3b7592c0..9068ede28f2b606d12c1ee6b7e39f5afd2af0836 100644 (file)
@@ -767,8 +767,12 @@ void fcp_release(fc_channel *fcchain, int count)  /* count must > 0 */
 
 static void fcp_scsi_done (Scsi_Cmnd *SCpnt)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&SCpnt->host->host_lock, flags);
        if (FCP_CMND(SCpnt)->done)
                FCP_CMND(SCpnt)->done(SCpnt);
+       spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 }
 
 static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, int prepare)
@@ -913,8 +917,12 @@ int fcp_scsi_abort(Scsi_Cmnd *SCpnt)
         */
 
        if (++fc->abort_count < (fc->can_queue >> 1)) {
+               unsigned long flags;
+
                SCpnt->result = DID_ABORT;
+               spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                fcmd->done(SCpnt);
+               spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                printk("FC: soft abort\n");
                return SUCCESS;
        } else {
index 924de1cb7eb054dbf126fff3b92714bd38440904..19aee0628adfb0c018f578b8ebbe927a5ff8a7b1 100644 (file)
@@ -341,14 +341,14 @@ static void soc_intr(int irq, void *dev_id, struct pt_regs *regs)
        unsigned long flags;
        register struct soc *s = (struct soc *)dev_id;
 
-       spin_lock_irqsave(&io_request_lock, flags);
+       spin_lock_irqsave(&s->lock, flags);
        cmd = sbus_readl(s->regs + CMD);
        for (; (cmd = SOC_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) {
                if (cmd & SOC_CMD_RSP_Q1) soc_unsolicited (s);
                if (cmd & SOC_CMD_RSP_Q0) soc_solicited (s);
                if (cmd & SOC_CMD_REQ_QALL) soc_request (s, cmd);
        }
-       spin_unlock_irqrestore(&io_request_lock, flags);
+       spin_unlock_irqrestore(&s->lock, flags);
 }
 
 #define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port))
@@ -559,6 +559,7 @@ static inline void soc_init(struct sbus_dev *sdev, int no)
        if (s == NULL)
                return;
        memset (s, 0, sizeof(struct soc));
+       spin_lock_init(&s->lock);
        s->soc_no = no;
 
        SOD(("socs %08lx soc_intr %08lx soc_hw_enque %08x\n",
index 740e1a3955296505222d04f8e072d0fccaf8915f..c9c6d1d9d95912879b5c2290a6cad4155e016125 100644 (file)
@@ -265,6 +265,7 @@ typedef struct {
 } soc_cq;
 
 struct soc {
+       spinlock_t              lock;
        soc_port                port[2]; /* Every SOC has one or two FC ports */
        soc_cq                  req[2]; /* Request CQs */
        soc_cq                  rsp[2]; /* Response CQs */
index bec5167335861cc6be7e8af5891eb78697238c1a..447a4de67f6a2af12c5818a7a9718cbc18878ab9 100644 (file)
@@ -411,7 +411,7 @@ static void socal_intr(int irq, void *dev_id, struct pt_regs *regs)
        unsigned long flags;
        register struct socal *s = (struct socal *)dev_id;
 
-       spin_lock_irqsave(&io_request_lock, flags);
+       spin_lock_irqsave(&s->lock, flags);
        cmd = sbus_readl(s->regs + CMD);
        for (; (cmd = SOCAL_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) {
 #ifdef SOCALDEBUG
@@ -428,7 +428,7 @@ static void socal_intr(int irq, void *dev_id, struct pt_regs *regs)
                if (cmd & SOCAL_CMD_REQ_QALL)
                        socal_request (s, cmd);
        }
-       spin_unlock_irqrestore(&io_request_lock, flags);
+       spin_unlock_irqrestore(&s->lock, flags);
 }
 
 #define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port))
@@ -667,6 +667,7 @@ static inline void socal_init(struct sbus_dev *sdev, int no)
        s = kmalloc (sizeof (struct socal), GFP_KERNEL);
        if (!s) return;
        memset (s, 0, sizeof(struct socal));
+       spin_lock_init(&s->lock);
        s->socal_no = no;
 
        SOD(("socals %08lx socal_intr %08lx socal_hw_enque %08lx\n",
index 8e8c7f4519829649f9dd360dd9e6867bc32838ed..a853fad92a7559c5a41a26ae031a57e8862c96ae 100644 (file)
@@ -290,6 +290,7 @@ typedef struct {
 } socal_cq;
 
 struct socal {
+       spinlock_t              lock;
        socal_port              port[2]; /* Every SOCAL has one or two FC ports */
        socal_cq                req[4]; /* Request CQs */
        socal_cq                rsp[4]; /* Response CQs */
index 38c0777473eaa9644f51e80a1f46ae63244cf2f3..08485cf66fe358900bbe7a2bd4fe391670e0db9d 100644 (file)
@@ -62,6 +62,8 @@
 #define HD_IRQ IRQ_HARDDISK
 #endif
 
+static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
+
 static int revalidate_hddisk(kdev_t, int);
 
 #define        HD_DELAY        0
@@ -106,7 +108,7 @@ static int NR_HD;
 static struct hd_struct hd[MAX_HD<<6];
 static int hd_sizes[MAX_HD<<6];
 static int hd_blocksizes[MAX_HD<<6];
-static int hd_hardsectsizes[MAX_HD<<6];
+
 
 static struct timer_list device_timer;
 
@@ -464,7 +466,7 @@ ok_to_write:
        i = --CURRENT->nr_sectors;
        --CURRENT->current_nr_sectors;
        CURRENT->buffer += 512;
-       if (!i || (CURRENT->bh && !SUBSECTOR(i)))
+       if (!i || (CURRENT->bio && !SUBSECTOR(i)))
                end_request(1);
        if (i > 0) {
                SET_INTR(&write_intr);
@@ -586,24 +588,29 @@ repeat:
                dev+'a', (CURRENT->cmd == READ)?"read":"writ",
                cyl, head, sec, nsect, (unsigned long) CURRENT->buffer);
 #endif
-       if (CURRENT->cmd == READ) {
-               hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
-               if (reset)
-                       goto repeat;
-               return;
-       }
-       if (CURRENT->cmd == WRITE) {
-               hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
-               if (reset)
-                       goto repeat;
-               if (wait_DRQ()) {
-                       bad_rw_intr();
-                       goto repeat;
+       if(CURRENT->flags & REQ_CMD) {
+               switch (rq_data_dir(CURRENT)) {
+               case READ:
+                       hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
+                       if (reset)
+                               goto repeat;
+                       break;
+               case WRITE:
+                       hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
+                       if (reset)
+                               goto repeat;
+                       if (wait_DRQ()) {
+                               bad_rw_intr();
+                               goto repeat;
+                       }
+                       outsw(HD_DATA,CURRENT->buffer,256);
+                       break;
+               default:
+                       printk("unknown hd-command\n");
+                       end_request(0);
+                       break;
                }
-               outsw(HD_DATA,CURRENT->buffer,256);
-               return;
        }
-       panic("unknown hd-command");
 }
 
 static void do_hd_request (request_queue_t * q)
@@ -723,12 +730,11 @@ static void __init hd_geninit(void)
 {
        int drive;
 
-       for(drive=0; drive < (MAX_HD << 6); drive++) {
+       for(drive=0; drive < (MAX_HD << 6); drive++)
                hd_blocksizes[drive] = 1024;
-               hd_hardsectsizes[drive] = 512;
-       }
+
        blksize_size[MAJOR_NR] = hd_blocksizes;
-       hardsect_size[MAJOR_NR] = hd_hardsectsizes;
+       blk_queue_hardsect_size(QUEUE, 512);
 
 #ifdef __i386__
        if (!NR_HD) {
@@ -830,7 +836,7 @@ int __init hd_init(void)
                printk("hd: unable to get major %d for hard disk\n",MAJOR_NR);
                return -1;
        }
-       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+       blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &hd_lock);
        blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), 255);
        read_ahead[MAJOR_NR] = 8;               /* 8 sector (4kB) read-ahead */
        add_gendisk(&hd_gendisk);
index ecdcd85d5d28ecafa6e959e8500b93cdf99be77a..fb638ca3065855ff8cd710d71ddea37549cd6fa3 100644 (file)
@@ -232,8 +232,8 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
 
        nents = blk_rq_map_sg(q, rq, hwif->sg_table);
 
-       if (rq->q && nents > rq->nr_segments)
-               printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
+       if (rq->q && nents > rq->nr_phys_segments)
+               printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
 
        if (rq_data_dir(rq) == READ)
                hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
index 6201c2d1600d05dc826e0368c541f576b0be659a..3f93dcc90fd318d275f5c1fb7854b54d462b4a0c 100644 (file)
@@ -608,8 +608,11 @@ static void ide_init_queue(ide_drive_t *drive)
 #endif
        blk_queue_max_sectors(q, max_sectors);
 
-       /* IDE DMA can do PRD_ENTRIES number of segments */
-       q->max_segments = PRD_ENTRIES;
+       /* IDE DMA can do PRD_ENTRIES number of segments. */
+       blk_queue_max_hw_segments(q, PRD_ENTRIES);
+
+       /* This is a driver limit and could be eliminated. */
+       blk_queue_max_phys_segments(q, PRD_ENTRIES);
 }
 
 /*
index c1b19e1d925559cbe5b01f0dd0524955232587ed..c4eb0a4a3c62080dec26ee85b7b8d5ed1320db35 100644 (file)
@@ -3686,6 +3686,7 @@ EXPORT_SYMBOL(ide_spin_wait_hwgroup);
  */
 devfs_handle_t ide_devfs_handle;
 
+EXPORT_SYMBOL(ide_lock);
 EXPORT_SYMBOL(ide_probe);
 EXPORT_SYMBOL(drive_is_flashcard);
 EXPORT_SYMBOL(ide_timer_expiry);
@@ -3718,6 +3719,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
 EXPORT_SYMBOL(ide_end_drive_cmd);
 EXPORT_SYMBOL(ide_end_request);
 EXPORT_SYMBOL(__ide_end_request);
+EXPORT_SYMBOL(ide_revalidate_drive);
 EXPORT_SYMBOL(ide_revalidate_disk);
 EXPORT_SYMBOL(ide_cmd);
 EXPORT_SYMBOL(ide_wait_cmd);
index 372583a05c2160a8361ee160f6ddcd4bed68dfc7..6044b053111009c13cc91c097b78cb3ec3d2d911 100644 (file)
@@ -69,6 +69,7 @@
 #include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/major.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
index 1e7c77b395afa2aae1931171eab2f6a99a406db8..9b6d48e9e3e9153845e069694d7f4b70697696ac 100644 (file)
@@ -65,7 +65,7 @@
 #include <linux/errno.h>
 #include <linux/kdev_t.h>
 #include <linux/blkdev.h>
-#include <linux/blk.h>         /* for io_request_lock (spinlock) decl */
+#include <linux/blk.h>
 #include "../../scsi/scsi.h"
 #include "../../scsi/hosts.h"
 #include "../../scsi/sd.h"
@@ -246,9 +246,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r)
                mf_chk = search_taskQ(1,sc,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
                if (mf_chk != NULL) {
                        sc->result = DID_ABORT << 16;
-                       spin_lock_irqsave(&io_request_lock, flags);
+                       spin_lock_irqsave(&sc->host->host_lock, flags);
                        sc->scsi_done(sc);
-                       spin_unlock_irqrestore(&io_request_lock, flags);
+                       spin_unlock_irqrestore(&sc->host->host_lock, flags);
                        return 1;
                }
        }
@@ -426,9 +426,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r)
                                         scsi_to_pci_dma_dir(sc->sc_data_direction));
                }
 
-               spin_lock_irqsave(&io_request_lock, flags);
+               spin_lock_irqsave(&sc->host->host_lock, flags);
                sc->scsi_done(sc);
-               spin_unlock_irqrestore(&io_request_lock, flags);
+               spin_unlock_irqrestore(&sc->host->host_lock, flags);
        }
 
        return 1;
@@ -928,9 +928,9 @@ mptscsih_qcmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
                        }
                        SCpnt->resid = SCpnt->request_bufflen - mpt_sdev->sense_sz;
                        SCpnt->result = 0;
-/*                     spin_lock(&io_request_lock);    */
+/*                     spin_lock(&SCpnt->host->host_lock);     */
                        SCpnt->scsi_done(SCpnt);
-/*                     spin_unlock(&io_request_lock);  */
+/*                     spin_unlock(&SCpnt->host->host_lock);   */
                        return 0;
                }
        }
@@ -1333,9 +1333,9 @@ mptscsih_abort(Scsi_Cmnd * SCpnt)
        if (ctx2abort == -1) {
                printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#2) for SCpnt=%p\n", SCpnt);
                SCpnt->result = DID_SOFT_ERROR << 16;
-               spin_lock_irqsave(&io_request_lock, flags);
+               spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                SCpnt->scsi_done(SCpnt);
-               spin_unlock_irqrestore(&io_request_lock, flags);
+               spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
        } else {
                dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort));
@@ -1352,9 +1352,9 @@ mptscsih_abort(Scsi_Cmnd * SCpnt)
                                        ": WARNING[2] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
                                        i, mf, SCpnt);
                        SCpnt->result = DID_SOFT_ERROR << 16;
-                       spin_lock_irqsave(&io_request_lock, flags);
+                       spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                        SCpnt->scsi_done(SCpnt);
-                       spin_unlock_irqrestore(&io_request_lock, flags);
+                       spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                        mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
                }
        }
@@ -1428,9 +1428,9 @@ mptscsih_dev_reset(Scsi_Cmnd * SCpnt)
                                ": WARNING[3] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
                                i, mf, SCpnt);
                SCpnt->result = DID_SOFT_ERROR << 16;
-               spin_lock_irqsave(&io_request_lock, flags);
+               spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                SCpnt->scsi_done(SCpnt);
-               spin_unlock_irqrestore(&io_request_lock, flags);
+               spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
        }
 
@@ -1502,9 +1502,9 @@ mptscsih_bus_reset(Scsi_Cmnd * SCpnt)
                                ": WARNING[4] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
                                i, mf, SCpnt);
                SCpnt->result = DID_SOFT_ERROR << 16;
-               spin_lock_irqsave(&io_request_lock, flags);
+               spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                SCpnt->scsi_done(SCpnt);
-               spin_unlock_irqrestore(&io_request_lock, flags);
+               spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
        }
 
@@ -1748,9 +1748,9 @@ mptscsih_taskmgmt_bh(void *sc)
                        if (ctx2abort == -1) {
                                printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#1) for SCpnt=%p\n", SCpnt);
                                SCpnt->result = DID_SOFT_ERROR << 16;
-                               spin_lock_irqsave(&io_request_lock, flags);
+                               spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                                SCpnt->scsi_done(SCpnt);
-                               spin_unlock_irqrestore(&io_request_lock, flags);
+                               spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                                mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
                                continue;
                        }
@@ -1797,9 +1797,9 @@ mptscsih_taskmgmt_bh(void *sc)
                    != 0) {
                        printk(KERN_WARNING MYNAM ": WARNING[1] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", i, mf, SCpnt);
                        SCpnt->result = DID_SOFT_ERROR << 16;
-                       spin_lock_irqsave(&io_request_lock, flags);
+                       spin_lock_irqsave(&SCpnt->host->host_lock, flags);
                        SCpnt->scsi_done(SCpnt);
-                       spin_unlock_irqrestore(&io_request_lock, flags);
+                       spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
                        mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
                } else {
                        /* Spin-Wait for TaskMgmt complete!!! */
index bdf52592bd26b17d49519acfe7d4fc07a9009943..c64b7393b484515b74f66f0b0a5b990458cb7ace 100644 (file)
@@ -1301,7 +1301,8 @@ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, i
                request_queue_t *q = i2ob_dev[unit].req_queue;
 
                blk_queue_max_sectors(q, 256);
-               blk_queue_max_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+               blk_queue_max_phys_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+               blk_queue_max_hw_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
 
                if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 2)
                        i2ob_dev[i].depth = 32;
@@ -1309,14 +1310,16 @@ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, i
                if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 1)
                {
                        blk_queue_max_sectors(q, 32);
-                       blk_queue_max_segments(q, 8);
+                       blk_queue_max_phys_segments(q, 8);
+                       blk_queue_max_hw_segments(q, 8);
                        i2ob_dev[i].depth = 4;
                }
 
                if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req)
                {
                        blk_queue_max_sectors(q, 8);
-                       blk_queue_max_segments(q, 8);
+                       blk_queue_max_phys_segments(q, 8);
+                       blk_queue_max_hw_segments(q, 8);
                }
        }
 
index f3528a29e44cd9e47829502774578cd9991efe9a..2e51889ac9705de4efc50de517e26c252643cc99 100644 (file)
@@ -135,7 +135,7 @@ obj-$(CONFIG_CHR_DEV_SG)    += sg.o
 list-multi     := scsi_mod.o sd_mod.o sr_mod.o initio.o a100u2w.o cpqfc.o
 scsi_mod-objs  := scsi.o hosts.o scsi_ioctl.o constants.o scsicam.o \
                        scsi_proc.o scsi_error.o scsi_queue.o scsi_lib.o \
-                       scsi_merge.o scsi_dma.o scsi_scan.o scsi_syms.o
+                       scsi_merge.o scsi_scan.o scsi_syms.o
 sd_mod-objs    := sd.o
 sr_mod-objs    := sr.o sr_ioctl.o sr_vendor.o
 initio-objs    := ini9100u.o i91uscsi.o
index 206233d1af2da78b5792992f6d5118f3d1c7cf59..514c2be6ba12ec4b9f00993cc4dc0fd309003922 100644 (file)
@@ -1,6 +1,6 @@
 The Linux NCR53C8XX/SYM53C8XX drivers README file
 
-Written by Gerard Roudier <groudier@club-internet.fr>
+Written by Gerard Roudier <groudier@free.fr>
 21 Rue Carnot
 95170 DEUIL LA BARRE - FRANCE
 
@@ -87,7 +87,7 @@ Written by Gerard Roudier <groudier@club-internet.fr>
 
 The initial Linux ncr53c8xx driver has been a port of the ncr driver from 
 FreeBSD that has been achieved in November 1995 by:
-          Gerard Roudier              <groudier@club-internet.fr>
+          Gerard Roudier              <groudier@free.fr>
 
 The original driver has been written for 386bsd and FreeBSD by:
           Wolfgang Stanglmeier        <wolf@cologne.de>
@@ -1287,7 +1287,7 @@ appropriate mailing lists or news-groups.  Send me a copy in order to
 be sure I will receive it.  Obviously, a bug in the driver code is
 possible.
 
-     My email address: Gerard Roudier <groudier@club-internet.fr>
+     My email address: Gerard Roudier <groudier@free.fr>
 
 Allowing disconnections is important if you use several devices on
 your SCSI bus but often causes problems with buggy devices.
index be4495fdf7ab00552f03729bbf2cbc763a2f9d25..9177efb7342b30dbfd5524648c34e4121ea9e202 100644 (file)
@@ -3084,7 +3084,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
        * we check data_cmnd[0].  This catches the conditions for st.c, but
        * I'm still not sure if request.cmd is valid for sg devices.
        */
-      if ( (cmd->request.cmd == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
+      if ( (rq_data_dir(&cmd->request) == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
            (cmd->data_cmnd[0] == WRITE_FILEMARKS) )
       {
         sp->w_total++;
@@ -4294,7 +4294,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
       {
         printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
           "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
-          (cmd->request.cmd == WRITE) ? "wrote" : "read", actual,
+          (rq_data_dir(&cmd->request) == WRITE) ? "wrote" : "read", actual,
           hscb->residual_SG_segment_count);
         printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb),
           hscb->target_status);
index 0ec9d562bd3d4d3511166b485cee23de8a4593a8..3f5bbb08cb4a6e49120eb2f52e8d281ea9550c08 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: esp.c,v 1.99 2001/02/13 01:17:01 davem Exp $
+/* $Id: esp.c,v 1.100 2001/12/11 04:55:48 davem Exp $
  * esp.c:  EnhancedScsiProcessor Sun SCSI driver code.
  *
  * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu)
@@ -1035,9 +1035,6 @@ static void __init esp_init_swstate(struct esp *esp)
 {
        int i;
 
-       /* Driver spinlock... */
-       spin_lock_init(&esp->lock);
-
        /* Command queues... */
        esp->current_SC = NULL;
        esp->disconnected_SC = NULL;
@@ -1816,7 +1813,6 @@ after_nego_msg_built:
 int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
 {
        struct esp *esp;
-       unsigned long flags;
 
        /* Set up func ptr and initial driver cmd-phase. */
        SCpnt->scsi_done = done;
@@ -1834,8 +1830,6 @@ int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
        SCpnt->SCp.Message          = 0xff;
        SCpnt->SCp.sent_command     = 0;
 
-       spin_lock_irqsave(&esp->lock, flags);
-
        /* Place into our queue. */
        if (SCpnt->cmnd[0] == REQUEST_SENSE) {
                ESPQUEUE(("RQSENSE\n"));
@@ -1849,8 +1843,6 @@ int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
        if (!esp->current_SC && !esp->resetting_bus)
                esp_exec_cmd(esp);
 
-       spin_unlock_irqrestore(&esp->lock, flags);
-
        return 0;
 }
 
@@ -1926,7 +1918,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
        unsigned long flags;
        int don;
 
-       spin_lock_irqsave(&esp->lock, flags);
+       spin_lock_irqsave(&esp->ehost->host_lock, flags);
 
        ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
        esp_dump_state(esp);
@@ -1942,7 +1934,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
                esp->msgout_len = 1;
                esp->msgout_ctr = 0;
                esp_cmd(esp, ESP_CMD_SATN);
-               spin_unlock_irqrestore(&esp->lock, flags);
+               spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
                return SCSI_ABORT_PENDING;
        }
 
@@ -1964,14 +1956,14 @@ int esp_abort(Scsi_Cmnd *SCptr)
                                *prev = (Scsi_Cmnd *) this->host_scribble;
                                this->host_scribble = NULL;
 
-                               spin_unlock_irqrestore(&esp->lock, flags);
-
                                esp_release_dmabufs(esp, this);
                                this->result = DID_ABORT << 16;
                                this->scsi_done(this);
+
                                if (don)
                                        ESP_INTSON(esp->dregs);
 
+                               spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
                                return SCSI_ABORT_SUCCESS;
                        }
                }
@@ -1985,7 +1977,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
        if (esp->current_SC) {
                if (don)
                        ESP_INTSON(esp->dregs);
-               spin_unlock_irqrestore(&esp->lock, flags);
+               spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
                return SCSI_ABORT_BUSY;
        }
 
@@ -1998,7 +1990,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
 
        if (don)
                ESP_INTSON(esp->dregs);
-       spin_unlock_irqrestore(&esp->lock, flags);
+       spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
        return SCSI_ABORT_SNOOZE;
 }
 
@@ -2014,16 +2006,11 @@ static int esp_finish_reset(struct esp *esp)
        /* Clean up currently executing command, if any. */
        if (sp != NULL) {
                esp->current_SC = NULL;
-               spin_unlock(&esp->lock);
 
                esp_release_dmabufs(esp, sp);
                sp->result = (DID_RESET << 16);
 
-               spin_lock(&io_request_lock);
                sp->scsi_done(sp);
-               spin_unlock(&io_request_lock);
-
-               spin_lock(&esp->lock);
        }
 
        /* Clean up disconnected queue, they have been invalidated
@@ -2031,16 +2018,10 @@ static int esp_finish_reset(struct esp *esp)
         */
        if (esp->disconnected_SC) {
                while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
-                       spin_unlock(&esp->lock);
-
                        esp_release_dmabufs(esp, sp);
                        sp->result = (DID_RESET << 16);
 
-                       spin_lock(&io_request_lock);
                        sp->scsi_done(sp);
-                       spin_unlock(&io_request_lock);
-
-                       spin_lock(&esp->lock);
                }
        }
 
@@ -2071,9 +2052,9 @@ int esp_reset(Scsi_Cmnd *SCptr, unsigned int how)
        struct esp *esp = (struct esp *) SCptr->host->hostdata;
        unsigned long flags;
 
-       spin_lock_irqsave(&esp->lock, flags);
+       spin_lock_irqsave(&esp->ehost->host_lock, flags);
        (void) esp_do_resetbus(esp);
-       spin_unlock_irqrestore(&esp->lock, flags);
+       spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 
        return SCSI_RESET_PENDING;
 }
@@ -2085,16 +2066,12 @@ static void esp_done(struct esp *esp, int error)
 
        esp->current_SC = NULL;
 
-       spin_unlock(&esp->lock);
        esp_release_dmabufs(esp, done_SC);
        done_SC->result = error;
 
-       spin_lock(&io_request_lock);
        done_SC->scsi_done(done_SC);
-       spin_unlock(&io_request_lock);
 
        /* Bus is free, issue any commands in the queue. */
-       spin_lock(&esp->lock);
        if (esp->issue_SC && !esp->current_SC)
                esp_exec_cmd(esp);
 
@@ -4344,7 +4321,7 @@ static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
        struct esp *esp = dev_id;
        unsigned long flags;
 
-       spin_lock_irqsave(&esp->lock, flags);
+       spin_lock_irqsave(&esp->ehost->host_lock, flags);
        if (ESP_IRQ_P(esp->dregs)) {
                ESP_INTSOFF(esp->dregs);
 
@@ -4354,7 +4331,7 @@ static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
 
                ESP_INTSON(esp->dregs);
        }
-       spin_unlock_irqrestore(&esp->lock, flags);
+       spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 }
 
 int esp_revoke(Scsi_Device* SDptr)
index 0cc5e37532dbe4e678f770553740cdf932919544..70f1a7c6eae5aa03cea0ba4ad523effe910317c9 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: esp.h,v 1.28 2000/03/30 01:33:17 davem Exp $
+/* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $
  * esp.h:  Defines and structures for the Sparc ESP (Enhanced SCSI
  *         Processor) driver under Linux.
  *
@@ -64,7 +64,6 @@ enum esp_rev {
 
 /* We get one of these for each ESP probed. */
 struct esp {
-       spinlock_t              lock;
        unsigned long           eregs;          /* ESP controller registers */
        unsigned long           dregs;          /* DMA controller registers */
        struct sbus_dma         *dma;           /* DMA controller sw state */
@@ -416,6 +415,7 @@ extern int esp_revoke(Scsi_Device* SDptr);
                sg_tablesize:   SG_ALL,                         \
                cmd_per_lun:    1,                              \
                use_clustering: ENABLE_CLUSTERING,              \
+               highmem_io:     1,                              \
 }
 
 /* For our interrupt engine. */
index a33868d948b725aab1cb7a8a16b5ea31cdd0801a..cf32a8a3acbdda35543a12d5ebcd98533be6166d 100644 (file)
@@ -130,7 +130,8 @@ scsi_unregister(struct Scsi_Host * sh){
  * pain to reverse this, so we try to avoid it 
  */
 extern int blk_nohighio;
-struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j)
+{
     struct Scsi_Host * retval, *shpnt, *o_shp;
     Scsi_Host_Name *shn, *shn2;
     int flag_new = 1;
index 08f3ea2805b8e5cee380d2caa4d97c2afbdac2fb..9045cc4cbb14c5214c0c9bc42717b6af3f86554d 100644 (file)
@@ -334,7 +334,6 @@ struct Scsi_Host
     int resetting; /* if set, it means that last_reset is a valid value */
     unsigned long last_reset;
 
-
     /*
      * These three parameters can be used to allow for wide scsi,
      * and for host adapters that support multiple busses
index 263e1827d16541f53166e5f03b1c8e8a0b692056..ce2e2e09021252fa013a57071d0688c7498d1af8 100644 (file)
@@ -22,7 +22,7 @@
 **  This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
 **  and is currently maintained by
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
@@ -63,7 +63,7 @@
 **  August 18 1997 by Cort <cort@cs.nmt.edu>:
 **     Support for Power/PC (Big Endian).
 **
-**  June 20 1998 by Gerard Roudier <groudier@club-internet.fr>:
+**  June 20 1998 by Gerard Roudier
 **     Support for up to 64 tags per lun.
 **     O(1) everywhere (C and SCRIPTS) for normal cases.
 **     Low PCI traffic for command handling when on-chip RAM is present.
@@ -8127,10 +8127,14 @@ static  int     ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
                        segment = 1;
                }
        }
-       else if (use_sg <= MAX_SCATTER) {
+       else {
                struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
 
                use_sg = map_scsi_sg_data(np, cmd);
+               if (use_sg > MAX_SCATTER) {
+                       unmap_scsi_data(np, cmd);
+                       return -1;
+               }
                data = &data[MAX_SCATTER - use_sg];
 
                while (segment < use_sg) {
@@ -8143,9 +8147,6 @@ static    int     ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
                        ++segment;
                }
        }
-       else {
-               return -1;
-       }
 
        return segment;
 }
index ac3f3b9e85da7b88cdf18c7e9379f4dcc5e5d4d1..ac4e795a14034e410a4e814e1eee8067cb11c499 100644 (file)
@@ -22,7 +22,7 @@
 **  This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
 **  and is currently maintained by
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
index efe6d609db97b0ee87bd10d42cafc176d3041b45..0a6795243bf26e2af3c5d2ace99781eaf73985e1 100644 (file)
@@ -1375,7 +1375,7 @@ static void redo_port_db(unsigned long arg)
        hostdata->explore_timer.data = 0;
        del_timer(&hostdata->explore_timer);
 
-       spin_lock_irqsave(&io_request_lock, flags);
+       spin_lock_irqsave(&host->host_lock, flags);
 
        if (hostdata->adapter_state & AS_REDO_FABRIC_PORTDB || hostdata->adapter_state & AS_REDO_LOOP_PORTDB) {
                isp2x00_make_portdb(host);
@@ -1422,7 +1422,7 @@ static void redo_port_db(unsigned long arg)
                hostdata->adapter_state = AS_LOOP_GOOD;
        }
 
-       spin_unlock_irqrestore(&io_request_lock, flags);
+       spin_unlock_irqrestore(&host->host_lock, flags);
 
 }
 
@@ -1430,11 +1430,12 @@ static void redo_port_db(unsigned long arg)
 
 void do_isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
 {
+       struct Scsi_Host *host = dev_id;
        unsigned long flags;
 
-       spin_lock_irqsave(&io_request_lock, flags);
+       spin_lock_irqsave(&host->host_lock, flags);
        isp2x00_intr_handler(irq, dev_id, regs);
-       spin_unlock_irqrestore(&io_request_lock, flags);
+       spin_unlock_irqrestore(&host->host_lock, flags);
 }
 
 void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
index 6677a0f3e2e25fdc1d584c64efde48ae18edb7ab..bc761211bd7943f620379af1360273109ee16ff1 100644 (file)
@@ -970,11 +970,12 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
 
 void do_isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
 {
+       struct Scsi_Host *host = dev_id;
        unsigned long flags;
 
-       spin_lock_irqsave(&io_request_lock, flags);
+       spin_lock_irqsave(&host->host_lock, flags);
        isp1020_intr_handler(irq, dev_id, regs);
-       spin_unlock_irqrestore(&io_request_lock, flags);
+       spin_unlock_irqrestore(&host->host_lock, flags);
 }
 
 void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
index 8561843c022b9213d3bfad8ce5c0b7cb7ce7623a..1b8acd2faa702094a0215c92e3bd4a1efba98896 100644 (file)
@@ -1445,7 +1445,7 @@ static void qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
        spin_unlock(&qpti->lock);
 
        if (dq != NULL) {
-               spin_lock(&io_request_lock);
+               spin_lock(&qpti->qhost->host_lock);
                do {
                        Scsi_Cmnd *next;
 
@@ -1453,7 +1453,7 @@ static void qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
                        dq->scsi_done(dq);
                        dq = next;
                } while (dq != NULL);
-               spin_unlock(&io_request_lock);
+               spin_unlock(&qpti->qhost->host_lock);
        }
        __restore_flags(flags);
 }
index aad93471c7af4f48b3ea318d6cad03c219ea96a1..6c49ea1df1c212a1153e3cd0c5202775751605ef 100644 (file)
@@ -524,6 +524,7 @@ struct qlogicpti {
        sg_tablesize:   QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), \
        cmd_per_lun:    1,                                         \
        use_clustering: ENABLE_CLUSTERING,                         \
+       highmem_io:     1,                                         \
 }
 
 /* For our interrupt engine. */
index 656766c09f2d3340f34280d4cdc4843f9aa77b46..98a478083b0cbc2d872561f9b62c952bf55140e8 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <linux/completion.h>
+#include <linux/mempool.h>
 
 #define __KERNEL_SYSCALLS__
 
@@ -83,6 +84,18 @@ static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
 static void scsi_dump_status(int level);
 #endif
 
+#define SG_MEMPOOL_NR          5
+#define SG_MEMPOOL_SIZE                32
+
+struct scsi_host_sg_pool {
+       int size;
+       kmem_cache_t *slab;
+       mempool_t *pool;
+};
+
+static const int scsi_host_sg_pool_sizes[SG_MEMPOOL_NR] = { 8, 16, 32, 64, MAX_PHYS_SEGMENTS };
+struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR];
+
 /*
    static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
  */
@@ -181,23 +194,22 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
 void  scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
 {
        request_queue_t *q = &SDpnt->request_queue;
-       int max_segments = SHpnt->sg_tablesize;
 
        blk_init_queue(q, scsi_request_fn, &SHpnt->host_lock);
        q->queuedata = (void *) SDpnt;
 
-#ifdef DMA_CHUNK_SIZE
-       if (max_segments > 64)
-               max_segments = 64;
-#endif
+       /* Hardware imposed limit. */
+       blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
+
+       /*
+        * When we remove scsi_malloc soonish, this can die too
+        */
+       blk_queue_max_phys_segments(q, PAGE_SIZE / sizeof(struct scatterlist));
 
-       blk_queue_max_segments(q, max_segments);
        blk_queue_max_sectors(q, SHpnt->max_sectors);
 
        if (!SHpnt->use_clustering)
                clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
-       if (SHpnt->unchecked_isa_dma)
-               blk_queue_segment_boundary(q, ISA_DMA_THRESHOLD);
 }
 
 #ifdef MODULE
@@ -1955,13 +1967,6 @@ static int scsi_register_host(Scsi_Host_Template * tpnt)
                                }
                }
 
-               /*
-                * Now that we have all of the devices, resize the DMA pool,
-                * as required.  */
-               if (!out_of_space)
-                       scsi_resize_dma_pool();
-
-
                /* This does any final handling that is required. */
                for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
                        if (sdtpnt->finish && sdtpnt->nr_dev) {
@@ -2160,14 +2165,6 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
                tpnt->present--;
        }
 
-       /*
-        * If there are absolutely no more hosts left, it is safe
-        * to completely nuke the DMA pool.  The resize operation will
-        * do the right thing and free everything.
-        */
-       if (!scsi_hosts)
-               scsi_resize_dma_pool();
-
        if (pcount0 != next_scsi_host)
                printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
                       (next_scsi_host == 1) ? "" : "s");
@@ -2268,8 +2265,6 @@ static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
         */
        if (tpnt->finish && tpnt->nr_dev)
                (*tpnt->finish) ();
-       if (!out_of_space)
-               scsi_resize_dma_pool();
        MOD_INC_USE_COUNT;
 
        if (out_of_space) {
@@ -2535,16 +2530,81 @@ int __init scsi_setup(char *str)
 __setup("scsihosts=", scsi_setup);
 #endif
 
+static void *scsi_pool_alloc(int gfp_mask, void *data)
+{
+       return kmem_cache_alloc(data, gfp_mask);
+}
+
+static void scsi_pool_free(void *ptr, void *data)
+{
+       kmem_cache_free(data, ptr);
+}
+
+struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask)
+{
+       struct scsi_host_sg_pool *sgp;
+       struct scatterlist *sgl;
+
+       BUG_ON(!SCpnt->use_sg);
+
+       switch (SCpnt->use_sg) {
+               case 1 ... 8                    : SCpnt->sglist_len = 0; break;
+               case 9 ... 16                   : SCpnt->sglist_len = 1; break;
+               case 17 ... 32                  : SCpnt->sglist_len = 2; break;
+               case 33 ... 64                  : SCpnt->sglist_len = 3; break;
+               case 65 ... MAX_PHYS_SEGMENTS   : SCpnt->sglist_len = 4; break;
+               default: return NULL;
+       }
+
+       sgp = scsi_sg_pools + SCpnt->sglist_len;
+
+       sgl = mempool_alloc(sgp->pool, gfp_mask);
+       if (sgl) {
+               memset(sgl, 0, sgp->size);
+               return sgl;
+       }
+
+       return sgl;
+}
+
+void scsi_free_sgtable(struct scatterlist *sgl, int index)
+{
+       struct scsi_host_sg_pool *sgp = scsi_sg_pools + index;
+
+       if (unlikely(index > SG_MEMPOOL_NR)) {
+               printk("scsi_free_sgtable: mempool %d\n", index);
+               BUG();
+       }
+
+       mempool_free(sgl, sgp->pool);
+}
+
 static int __init init_scsi(void)
 {
        struct proc_dir_entry *generic;
+       char name[16];
+       int i;
 
        printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
 
-        if( scsi_init_minimal_dma_pool() != 0 )
-        {
-                return 1;
-        }
+       /*
+        * setup sg memory pools
+        */
+       for (i = 0; i < SG_MEMPOOL_NR; i++) {
+               struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+               int size = scsi_host_sg_pool_sizes[i] * sizeof(struct scatterlist);
+
+               snprintf(name, sizeof(name) - 1, "sgpool-%d", scsi_host_sg_pool_sizes[i]);
+               sgp->slab = kmem_cache_create(name, size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+               if (!sgp->slab)
+                       panic("SCSI: can't init sg slab\n");
+
+               sgp->pool = mempool_create(SG_MEMPOOL_SIZE, scsi_pool_alloc, scsi_pool_free, sgp->slab);
+               if (!sgp->pool)
+                       panic("SCSI: can't init sg mempool\n");
+
+               sgp->size = size;
+       }
 
        /*
         * This makes /proc/scsi and /proc/scsi/scsi visible.
@@ -2580,6 +2640,7 @@ static int __init init_scsi(void)
 static void __exit exit_scsi(void)
 {
        Scsi_Host_Name *shn, *shn2 = NULL;
+       int i;
 
        remove_bh(SCSI_BH);
 
@@ -2600,11 +2661,13 @@ static void __exit exit_scsi(void)
        remove_proc_entry ("scsi", 0);
 #endif
        
-       /*
-        * Free up the DMA pool.
-        */
-       scsi_resize_dma_pool();
-
+       for (i = 0; i < SG_MEMPOOL_NR; i++) {
+               struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+               mempool_destroy(sgp->pool);
+               kmem_cache_destroy(sgp->slab);
+               sgp->pool = NULL;
+               sgp->slab = NULL;
+       }
 }
 
 module_init(init_scsi);
index 3e6b1c3b34dc549be7b52a3dca6e3b352f26f46b..b8ad3f4aa887c6d003fe22f45b58e6015acabf7c 100644 (file)
@@ -438,6 +438,12 @@ extern int  scsi_partsize(struct buffer_head *bh, unsigned long capacity,
                     unsigned int *cyls, unsigned int *hds,
                     unsigned int *secs);
 
+/*
+ * sg list allocations
+ */
+struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask);
+void scsi_free_sgtable(struct scatterlist *sgl, int index);
+
 /*
  * Prototypes for functions in scsi_dma.c
  */
@@ -449,8 +455,8 @@ int scsi_free(void *, unsigned int);
 /*
  * Prototypes for functions in scsi_merge.c
  */
-extern void recount_segments(Scsi_Cmnd * SCpnt);
-extern void initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_initialize_merge_fn(Scsi_Device *SDpnt);
+extern int scsi_init_io(Scsi_Cmnd *SCpnt);
 
 /*
  * Prototypes for functions in scsi_queue.c
@@ -555,8 +561,6 @@ struct scsi_device {
        request_queue_t request_queue;
         atomic_t                device_active; /* commands checked out for device */
        volatile unsigned short device_busy;    /* commands actually active on low-level */
-       int (*scsi_init_io_fn) (Scsi_Cmnd *);   /* Used to initialize
-                                                  new request */
        Scsi_Cmnd *device_queue;        /* queue of SCSI Command structures */
 
 /* public: */
index 9b947ee8257f943854f3c6a8c454ed6ce4efff6a..fda3c65283354b10626593b1e844a49172a7f018 100644 (file)
@@ -182,7 +182,6 @@ static void scsi_dump(Scsi_Cmnd * SCpnt, int flag)
        };
        printk("\n");
 #endif
-       printk("DMA free %d sectors.\n", scsi_dma_free_sectors);
 }
 
 int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
@@ -653,7 +652,6 @@ int scsi_debug_abort(Scsi_Cmnd * SCpnt)
 
 int scsi_debug_biosparam(Disk * disk, kdev_t dev, int *info)
 {
-       int size = disk->capacity;
        info[0] = N_HEAD;
        info[1] = N_SECTOR;
        info[2] = N_CYLINDER;
diff --git a/drivers/scsi/scsi_dma.c b/drivers/scsi/scsi_dma.c
deleted file mode 100644 (file)
index 3de8351..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- *  scsi_dma.c Copyright (C) 2000 Eric Youngdale
- *
- *  mid-level SCSI DMA bounce buffer allocator
- *
- */
-
-#define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/blk.h>
-
-
-#include "scsi.h"
-#include "hosts.h"
-#include "constants.h"
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-/*
- * PAGE_SIZE must be a multiple of the sector size (512).  True
- * for all reasonably recent architectures (even the VAX...).
- */
-#define SECTOR_SIZE            512
-#define SECTORS_PER_PAGE       (PAGE_SIZE/SECTOR_SIZE)
-
-#if SECTORS_PER_PAGE <= 8
-typedef unsigned char FreeSectorBitmap;
-#elif SECTORS_PER_PAGE <= 32
-typedef unsigned int FreeSectorBitmap;
-#else
-#error You lose.
-#endif
-
-/*
- * Used for access to internal allocator used for DMA safe buffers.
- */
-static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
-
-static FreeSectorBitmap *dma_malloc_freelist = NULL;
-static int need_isa_bounce_buffers;
-static unsigned int dma_sectors = 0;
-unsigned int scsi_dma_free_sectors = 0;
-unsigned int scsi_need_isa_buffer = 0;
-static unsigned char **dma_malloc_pages = NULL;
-
-/*
- * Function:    scsi_malloc
- *
- * Purpose:     Allocate memory from the DMA-safe pool.
- *
- * Arguments:   len       - amount of memory we need.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Pointer to memory block.
- *
- * Notes:       Prior to the new queue code, this function was not SMP-safe.
- *              This function can only allocate in units of sectors
- *              (i.e. 512 bytes).
- *
- *              We cannot use the normal system allocator becuase we need
- *              to be able to guarantee that we can process a complete disk
- *              I/O request without touching the system allocator.  Think
- *              about it - if the system were heavily swapping, and tried to
- *              write out a block of memory to disk, and the SCSI code needed
- *              to allocate more memory in order to be able to write the
- *              data to disk, you would wedge the system.
- */
-void *scsi_malloc(unsigned int len)
-{
-       unsigned int nbits, mask;
-       unsigned long flags;
-
-       int i, j;
-       if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
-               return NULL;
-
-       nbits = len >> 9;
-       mask = (1 << nbits) - 1;
-
-       spin_lock_irqsave(&allocator_request_lock, flags);
-
-       for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
-               for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
-                       if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
-                               dma_malloc_freelist[i] |= (mask << j);
-                               scsi_dma_free_sectors -= nbits;
-#ifdef DEBUG
-                               SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
-                               printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
-#endif
-                               spin_unlock_irqrestore(&allocator_request_lock, flags);
-                               return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
-                       }
-               }
-       spin_unlock_irqrestore(&allocator_request_lock, flags);
-       return NULL;            /* Nope.  No more */
-}
-
-/*
- * Function:    scsi_free
- *
- * Purpose:     Free memory into the DMA-safe pool.
- *
- * Arguments:   ptr       - data block we are freeing.
- *              len       - size of block we are freeing.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       This function *must* only be used to free memory
- *              allocated from scsi_malloc().
- *
- *              Prior to the new queue code, this function was not SMP-safe.
- *              This function can only allocate in units of sectors
- *              (i.e. 512 bytes).
- */
-int scsi_free(void *obj, unsigned int len)
-{
-       unsigned int page, sector, nbits, mask;
-       unsigned long flags;
-
-#ifdef DEBUG
-       unsigned long ret = 0;
-
-#ifdef __mips__
-       __asm__ __volatile__("move\t%0,$31":"=r"(ret));
-#else
-       ret = __builtin_return_address(0);
-#endif
-       printk("scsi_free %p %d\n", obj, len);
-       SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
-#endif
-
-       spin_lock_irqsave(&allocator_request_lock, flags);
-
-       for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
-               unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
-               if ((unsigned long) obj >= page_addr &&
-                   (unsigned long) obj < page_addr + PAGE_SIZE) {
-                       sector = (((unsigned long) obj) - page_addr) >> 9;
-
-                       nbits = len >> 9;
-                       mask = (1 << nbits) - 1;
-
-                       if (sector + nbits > SECTORS_PER_PAGE)
-                               panic("scsi_free:Bad memory alignment");
-
-                       if ((dma_malloc_freelist[page] &
-                            (mask << sector)) != (mask << sector)) {
-#ifdef DEBUG
-                               printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
-                                      obj, len, ret);
-#endif
-                               panic("scsi_free:Trying to free unused memory");
-                       }
-                       scsi_dma_free_sectors += nbits;
-                       dma_malloc_freelist[page] &= ~(mask << sector);
-                       spin_unlock_irqrestore(&allocator_request_lock, flags);
-                       return 0;
-               }
-       }
-       panic("scsi_free:Bad offset");
-}
-
-
-/*
- * Function:    scsi_resize_dma_pool
- *
- * Purpose:     Ensure that the DMA pool is sufficiently large to be
- *              able to guarantee that we can always process I/O requests
- *              without calling the system allocator.
- *
- * Arguments:   None.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       Prior to the new queue code, this function was not SMP-safe.
- *              Go through the device list and recompute the most appropriate
- *              size for the dma pool.  Then grab more memory (as required).
- */
-void scsi_resize_dma_pool(void)
-{
-       int i, k;
-       unsigned long size;
-       unsigned long flags;
-       struct Scsi_Host *shpnt;
-       struct Scsi_Host *host = NULL;
-       Scsi_Device *SDpnt;
-       FreeSectorBitmap *new_dma_malloc_freelist = NULL;
-       unsigned int new_dma_sectors = 0;
-       unsigned int new_need_isa_buffer = 0;
-       unsigned char **new_dma_malloc_pages = NULL;
-       int out_of_space = 0;
-
-       spin_lock_irqsave(&allocator_request_lock, flags);
-
-       if (!scsi_hostlist) {
-               /*
-                * Free up the DMA pool.
-                */
-               if (scsi_dma_free_sectors != dma_sectors)
-                       panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
-
-               for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
-                       free_pages((unsigned long) dma_malloc_pages[i], 0);
-               if (dma_malloc_pages)
-                       kfree((char *) dma_malloc_pages);
-               dma_malloc_pages = NULL;
-               if (dma_malloc_freelist)
-                       kfree((char *) dma_malloc_freelist);
-               dma_malloc_freelist = NULL;
-               dma_sectors = 0;
-               scsi_dma_free_sectors = 0;
-               spin_unlock_irqrestore(&allocator_request_lock, flags);
-               return;
-       }
-       /* Next, check to see if we need to extend the DMA buffer pool */
-
-       new_dma_sectors = 2 * SECTORS_PER_PAGE;         /* Base value we use */
-
-       if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
-               need_isa_bounce_buffers = 1;
-       else
-               need_isa_bounce_buffers = 0;
-
-       if (scsi_devicelist)
-               for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
-                       new_dma_sectors += SECTORS_PER_PAGE;    /* Increment for each host */
-
-       for (host = scsi_hostlist; host; host = host->next) {
-               for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
-                       /*
-                        * sd and sr drivers allocate scatterlists.
-                        * sr drivers may allocate for each command 1x2048 or 2x1024 extra
-                        * buffers for 2k sector size and 1k fs.
-                        * sg driver allocates buffers < 4k.
-                        * st driver does not need buffers from the dma pool.
-                        * estimate 4k buffer/command for devices of unknown type (should panic).
-                        */
-                       if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
-                           SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
-                               int nents = host->sg_tablesize;
-#ifdef DMA_CHUNK_SIZE
-                               /* If the architecture does DMA sg merging, make sure
-                                  we count with at least 64 entries even for HBAs
-                                  which handle very few sg entries.  */
-                               if (nents < 64) nents = 64;
-#endif
-                               new_dma_sectors += ((nents *
-                               sizeof(struct scatterlist) + 511) >> 9) *
-                                SDpnt->queue_depth;
-                               if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
-                                       new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
-                       } else if (SDpnt->type == TYPE_SCANNER ||
-                                  SDpnt->type == TYPE_PROCESSOR ||
-                                  SDpnt->type == TYPE_COMM ||
-                                  SDpnt->type == TYPE_MEDIUM_CHANGER ||
-                                  SDpnt->type == TYPE_ENCLOSURE) {
-                               new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
-                       } else {
-                               if (SDpnt->type != TYPE_TAPE) {
-                                       printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
-                                       new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
-                               }
-                       }
-
-                       if (host->unchecked_isa_dma &&
-                           need_isa_bounce_buffers &&
-                           SDpnt->type != TYPE_TAPE) {
-                               new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
-                                   SDpnt->queue_depth;
-                               new_need_isa_buffer++;
-                       }
-               }
-       }
-
-#ifdef DEBUG_INIT
-       printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
-#endif
-
-       /* limit DMA memory to 32MB: */
-       new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-
-       /*
-        * We never shrink the buffers - this leads to
-        * race conditions that I would rather not even think
-        * about right now.
-        */
-#if 0                          /* Why do this? No gain and risks out_of_space */
-       if (new_dma_sectors < dma_sectors)
-               new_dma_sectors = dma_sectors;
-#endif
-       if (new_dma_sectors <= dma_sectors) {
-               spin_unlock_irqrestore(&allocator_request_lock, flags);
-               return;         /* best to quit while we are in front */
-        }
-
-       for (k = 0; k < 20; ++k) {      /* just in case */
-               out_of_space = 0;
-               size = (new_dma_sectors / SECTORS_PER_PAGE) *
-                   sizeof(FreeSectorBitmap);
-               new_dma_malloc_freelist = (FreeSectorBitmap *)
-                   kmalloc(size, GFP_ATOMIC);
-               if (new_dma_malloc_freelist) {
-                        memset(new_dma_malloc_freelist, 0, size);
-                       size = (new_dma_sectors / SECTORS_PER_PAGE) *
-                           sizeof(*new_dma_malloc_pages);
-                       new_dma_malloc_pages = (unsigned char **)
-                           kmalloc(size, GFP_ATOMIC);
-                       if (!new_dma_malloc_pages) {
-                               size = (new_dma_sectors / SECTORS_PER_PAGE) *
-                                   sizeof(FreeSectorBitmap);
-                               kfree((char *) new_dma_malloc_freelist);
-                               out_of_space = 1;
-                       } else {
-                                memset(new_dma_malloc_pages, 0, size);
-                        }
-               } else
-                       out_of_space = 1;
-
-               if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
-                       for (i = dma_sectors / SECTORS_PER_PAGE;
-                          i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
-                               new_dma_malloc_pages[i] = (unsigned char *)
-                                   __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
-                               if (!new_dma_malloc_pages[i])
-                                       break;
-                       }
-                       if (i != new_dma_sectors / SECTORS_PER_PAGE) {  /* clean up */
-                               int k = i;
-
-                               out_of_space = 1;
-                               for (i = 0; i < k; ++i)
-                                       free_pages((unsigned long) new_dma_malloc_pages[i], 0);
-                       }
-               }
-               if (out_of_space) {     /* try scaling down new_dma_sectors request */
-                       printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
-                              "wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
-                       if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
-                               break;  /* pretty well hopeless ... */
-                       new_dma_sectors = (new_dma_sectors * 3) / 4;
-                       new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-                       if (new_dma_sectors <= dma_sectors)
-                               break;  /* stick with what we have got */
-               } else
-                       break;  /* found space ... */
-       }                       /* end of for loop */
-       if (out_of_space) {
-               spin_unlock_irqrestore(&allocator_request_lock, flags);
-               scsi_need_isa_buffer = new_need_isa_buffer;     /* some useful info */
-               printk("      WARNING, not enough memory, pool not expanded\n");
-               return;
-       }
-       /* When we dick with the actual DMA list, we need to
-        * protect things
-        */
-       if (dma_malloc_freelist) {
-               size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
-               memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
-               kfree((char *) dma_malloc_freelist);
-       }
-       dma_malloc_freelist = new_dma_malloc_freelist;
-
-       if (dma_malloc_pages) {
-               size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
-               memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
-               kfree((char *) dma_malloc_pages);
-       }
-       scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
-       dma_malloc_pages = new_dma_malloc_pages;
-       dma_sectors = new_dma_sectors;
-       scsi_need_isa_buffer = new_need_isa_buffer;
-
-       spin_unlock_irqrestore(&allocator_request_lock, flags);
-
-#ifdef DEBUG_INIT
-       printk("resize_dma_pool: dma free sectors   = %d\n", scsi_dma_free_sectors);
-       printk("resize_dma_pool: dma sectors        = %d\n", dma_sectors);
-       printk("resize_dma_pool: need isa buffers   = %d\n", scsi_need_isa_buffer);
-#endif
-}
-
-/*
- * Function:    scsi_init_minimal_dma_pool
- *
- * Purpose:     Allocate a minimal (1-page) DMA pool.
- *
- * Arguments:   None.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       
- */
-int scsi_init_minimal_dma_pool(void)
-{
-       unsigned long size;
-       unsigned long flags;
-       int has_space = 0;
-
-       spin_lock_irqsave(&allocator_request_lock, flags);
-
-       dma_sectors = PAGE_SIZE / SECTOR_SIZE;
-       scsi_dma_free_sectors = dma_sectors;
-       /*
-        * Set up a minimal DMA buffer list - this will be used during scan_scsis
-        * in some cases.
-        */
-
-       /* One bit per sector to indicate free/busy */
-       size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
-       dma_malloc_freelist = (FreeSectorBitmap *)
-           kmalloc(size, GFP_ATOMIC);
-       if (dma_malloc_freelist) {
-                memset(dma_malloc_freelist, 0, size);
-               /* One pointer per page for the page list */
-               dma_malloc_pages = (unsigned char **) kmalloc(
-                        (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
-                                                            GFP_ATOMIC);
-               if (dma_malloc_pages) {
-                        memset(dma_malloc_pages, 0, size);
-                       dma_malloc_pages[0] = (unsigned char *)
-                           __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
-                       if (dma_malloc_pages[0])
-                               has_space = 1;
-               }
-       }
-       if (!has_space) {
-               if (dma_malloc_freelist) {
-                       kfree((char *) dma_malloc_freelist);
-                       if (dma_malloc_pages)
-                               kfree((char *) dma_malloc_pages);
-               }
-               spin_unlock_irqrestore(&allocator_request_lock, flags);
-               printk("scsi::init_module: failed, out of memory\n");
-               return 1;
-       }
-
-       spin_unlock_irqrestore(&allocator_request_lock, flags);
-       return 0;
-}
index dc4681cd4d7c0058fd5b7cf04605f14f38cda816..f64d20090923203d35a12d763405e89670cf642c 100644 (file)
@@ -78,8 +78,7 @@ static int ioctl_probe(struct Scsi_Host *host, void *buffer)
  * *(char *) ((int *) arg)[2] the actual command byte.   
  * 
  * Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL.  MAX_BUF can be increased in
- * the future by increasing the size that scsi_malloc will accept.
+ * the ioctl will fail with error EINVAL.
  * 
  * This size *does not* include the initial lengths that were passed.
  * 
@@ -197,10 +196,14 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
        unsigned int inlen, outlen, cmdlen;
        unsigned int needed, buf_needed;
        int timeout, retries, result;
-       int data_direction;
+       int data_direction, gfp_mask = GFP_KERNEL;
 
        if (!sic)
                return -EINVAL;
+
+       if (dev->host->unchecked_isa_dma)
+               gfp_mask |= GFP_DMA;
+
        /*
         * Verify that we can read at least this much.
         */
@@ -232,7 +235,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
                buf_needed = (buf_needed + 511) & ~511;
                if (buf_needed > MAX_BUF)
                        buf_needed = MAX_BUF;
-               buf = (char *) scsi_malloc(buf_needed);
+               buf = (char *) kmalloc(buf_needed, gfp_mask);
                if (!buf)
                        return -ENOMEM;
                memset(buf, 0, buf_needed);
@@ -341,7 +344,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
 
 error:
        if (buf)
-               scsi_free(buf, buf_needed);
+               kfree(buf);
 
 
        return result;
index d7cc000bcdd2acf4e5d0100043d602fd40aa3e3f..317f21858c1fade858282eab53a9c8d94c65c753 100644 (file)
@@ -82,7 +82,7 @@ static void __scsi_insert_special(request_queue_t *q, struct request *rq,
        rq->special = data;
        rq->q = NULL;
        rq->bio = rq->biotail = NULL;
-       rq->nr_segments = 0;
+       rq->nr_phys_segments = 0;
        rq->elevator_sequence = 0;
 
        /*
@@ -461,13 +461,13 @@ static void scsi_release_buffers(Scsi_Cmnd * SCpnt)
                if (bbpnt) {
                        for (i = 0; i < SCpnt->use_sg; i++) {
                                if (bbpnt[i])
-                                       scsi_free(sgpnt[i].address, sgpnt[i].length);
+                                       kfree(sgpnt[i].address);
                        }
                }
-               scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
+               scsi_free_sgtable(SCpnt->request_buffer, SCpnt->sglist_len);
        } else {
                if (SCpnt->request_buffer != req->buffer)
-                       scsi_free(SCpnt->request_buffer,SCpnt->request_bufflen);
+                       kfree(SCpnt->request_buffer);
        }
 
        /*
@@ -541,11 +541,11 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
                                                       sgpnt[i].address,
                                                       sgpnt[i].length);
                                        }
-                                       scsi_free(sgpnt[i].address, sgpnt[i].length);
+                                       kfree(sgpnt[i].address);
                                }
                        }
                }
-               scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+               scsi_free_sgtable(SCpnt->buffer, SCpnt->sglist_len);
        } else {
                if (SCpnt->buffer != req->buffer) {
                        if (rq_data_dir(req) == READ) {
@@ -555,7 +555,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
                                memcpy(to, SCpnt->buffer, SCpnt->bufflen);
                                bio_kunmap_irq(to, &flags);
                        }
-                       scsi_free(SCpnt->buffer, SCpnt->bufflen);
+                       kfree(SCpnt->buffer);
                }
        }
 
@@ -922,15 +922,6 @@ void scsi_request_fn(request_queue_t * q)
                         */
                        if (req->special) {
                                SCpnt = (Scsi_Cmnd *) req->special;
-                               /*
-                                * We need to recount the number of
-                                * scatter-gather segments here - the
-                                * normal case code assumes this to be
-                                * correct, as it would be a performance
-                                * loss to always recount.  Handling
-                                * errors is always unusual, of course.
-                                */
-                               recount_segments(SCpnt);
                        } else {
                                SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
                        }
@@ -1003,7 +994,7 @@ void scsi_request_fn(request_queue_t * q)
                         * required).  Hosts that need bounce buffers will also
                         * get those allocated here.  
                         */
-                       if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+                       if (!scsi_init_io(SCpnt)) {
                                SCpnt = __scsi_end_request(SCpnt, 0, 
                                                           SCpnt->request.nr_sectors, 0, 0);
                                if( SCpnt != NULL )
index 89def7c84d79a9b4057d5ce045052adde3a61147..72ac525dbf30d2765ec47c0c4c9cf9590b7793e2 100644 (file)
 
 /*
  * This file contains queue management functions that are used by SCSI.
- * Typically this is used for several purposes.   First, we need to ensure
- * that commands do not grow so large that they cannot be handled all at
- * once by a host adapter.   The various flavors of merge functions included
- * here serve this purpose.
- *
- * Note that it would be quite trivial to allow the low-level driver the
- * flexibility to define it's own queue handling functions.  For the time
- * being, the hooks are not present.   Right now we are just using the
- * data in the host template as an indicator of how we should be handling
- * queues, and we select routines that are optimized for that purpose.
- *
- * Some hosts do not impose any restrictions on the size of a request.
- * In such cases none of the merge functions in this file are called,
- * and we allow ll_rw_blk to merge requests in the default manner.
- * This isn't guaranteed to be optimal, but it should be pretty darned
- * good.   If someone comes up with ideas of better ways of managing queues
- * to improve on the default behavior, then certainly fit it into this
- * scheme in whatever manner makes the most sense.   Please note that
- * since each device has it's own queue, we have considerable flexibility
- * in queue management.
+ * We need to ensure that commands do not grow so large that they cannot
+ * be handled all at once by a host adapter.
  */
 
 #define __NO_VERSION__
 #include <scsi/scsi_ioctl.h>
 
 /*
- * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
- * Ultimately we should get away from using a dedicated DMA bounce buffer
- * pool, and we should instead try and use kmalloc() instead.  If we can
- * eliminate this pool, then this restriction would no longer be needed.
- */
-#define DMA_SEGMENT_SIZE_LIMITED
-
-static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
-{
-       int jj;
-       struct scatterlist *sgpnt;
-       void **bbpnt;
-       int consumed = 0;
-
-       sgpnt = (struct scatterlist *) SCpnt->request_buffer;
-       bbpnt = SCpnt->bounce_buffers;
-
-       /*
-        * Now print out a bunch of stats.  First, start with the request
-        * size.
-        */
-       printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);
-       printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);
-       printk("request_bufflen:%d\n", SCpnt->request_bufflen);
-       /*
-        * Now dump the scatter-gather table, up to the point of failure.
-        */
-       for(jj=0; jj < SCpnt->use_sg; jj++)
-       {
-               printk("[%d]\tlen:%d\taddr:%p\tbounce:%p\n",
-                      jj,
-                      sgpnt[jj].length,
-                      sgpnt[jj].address,
-                      (bbpnt ? bbpnt[jj] : NULL));
-               if (bbpnt && bbpnt[jj])
-                       consumed += sgpnt[jj].length;
-       }
-       printk("Total %d sectors consumed\n", consumed);
-       panic("DMA pool exhausted");
-}
-
-/*
- * This entire source file deals with the new queueing code.
- */
-
-/*
- * Function:    __count_segments()
- *
- * Purpose:     Prototype for queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
- *                      expose all of the address lines, so that DMA cannot
- *                      be done from an arbitrary address).
- *             remainder - used to track the residual size of the last
- *                     segment.  Comes in handy when we want to limit the 
- *                     size of bounce buffer segments to PAGE_SIZE.
- *
- * Returns:     Count of the number of SG segments for the request.
- *
- * Lock status: 
- *
- * Notes:       This is only used for diagnostic purposes.
- */
-__inline static int __count_segments(struct request *req,
-                                    int dma_host,
-                                    int * remainder)
-{
-       int ret = 1;
-       int reqsize = 0;
-       int i;
-       struct bio *bio;
-       struct bio_vec *bvec;
-
-       if (remainder)
-               reqsize = *remainder;
-
-       /*
-        * Add in the size increment for the first buffer.
-        */
-       bio = req->bio;
-#ifdef DMA_SEGMENT_SIZE_LIMITED
-       if (reqsize + bio->bi_size > PAGE_SIZE)
-               ret++;
-#endif
-
-       rq_for_each_bio(bio, req) {
-               bio_for_each_segment(bvec, bio, i)
-                       ret++;
-
-               reqsize += bio->bi_size;
-       }
-
-       if (remainder)
-               *remainder = reqsize;
-
-       return ret;
-}
-
-/*
- * Function:    recount_segments()
- *
- * Purpose:     Recount the number of scatter-gather segments for this request.
- *
- * Arguments:   req     - request that needs recounting.
- *
- * Returns:     Count of the number of SG segments for the request.
- *
- * Lock status: Irrelevant.
- *
- * Notes:      This is only used when we have partially completed requests
- *             and the bit that is leftover is of an indeterminate size.
- *             This can come up if you get a MEDIUM_ERROR, for example,
- *             as we will have "completed" all of the sectors up to and
- *             including the bad sector, and the leftover bit is what
- *             we have to do now.  This tends to be a rare occurrence, so
- *             we aren't busting our butts to instantiate separate versions
- *             of this function for the 4 different flag values.  We
- *             probably should, however.
- */
-void
-recount_segments(Scsi_Cmnd * SCpnt)
-{
-       struct request *req = &SCpnt->request;
-       struct Scsi_Host *SHpnt = SCpnt->host;
-
-       req->nr_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL);
-}
-
-/*
- * IOMMU hackery for sparc64
- */
-#ifdef DMA_CHUNK_SIZE
-
-#define MERGEABLE_BUFFERS(X,Y) \
-       ((((bvec_to_phys(__BVEC_END((X))) + __BVEC_END((X))->bv_len) | bio_to_phys((Y))) & (DMA_CHUNK_SIZE - 1)) == 0)
-
-static inline int scsi_new_mergeable(request_queue_t * q,
-                                    struct request * req,
-                                    struct bio *bio)
-{
-       int nr_segs = bio_hw_segments(q, bio);
-
-       /*
-        * pci_map_sg will be able to merge these two
-        * into a single hardware sg entry, check if
-        * we'll have enough memory for the sg list.
-        * scsi.c allocates for this purpose
-        * min(64,sg_tablesize) entries.
-        */
-       if (req->nr_segments + nr_segs > q->max_segments)
-               return 0;
-
-       req->nr_segments += nr_segs;
-       return 1;
-}
-
-static inline int scsi_new_segment(request_queue_t * q,
-                                  struct request * req,
-                                  struct bio *bio)
-{
-       int nr_segs = bio_hw_segments(q, bio);
-       /*
-        * pci_map_sg won't be able to map these two
-        * into a single hardware sg entry, so we have to
-        * check if things fit into sg_tablesize.
-        */
-       if (req->nr_hw_segments + nr_segs > q->max_segments)
-               return 0;
-       else if (req->nr_segments + nr_segs > q->max_segments)
-               return 0;
-
-       req->nr_hw_segments += nr_segs;
-       req->nr_segments += nr_segs;
-       return 1;
-}
-
-#else /* DMA_CHUNK_SIZE */
-
-static inline int scsi_new_segment(request_queue_t * q,
-                                  struct request * req,
-                                  struct bio *bio)
-{
-       int nr_segs = bio_hw_segments(q, bio);
-
-       if (req->nr_segments + nr_segs > q->max_segments) {
-               req->flags |= REQ_NOMERGE;
-               return 0;
-       }
-
-       /*
-        * This will form the start of a new segment.  Bump the 
-        * counter.
-        */
-       req->nr_segments += nr_segs;
-       return 1;
-}
-#endif /* DMA_CHUNK_SIZE */
-
-/*
- * Function:    __scsi_merge_fn()
- *
- * Purpose:     Prototype for queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              bio     - Block which we may wish to merge into request
- *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
- *                      expose all of the address lines, so that DMA cannot
- *                      be done from an arbitrary address).
- *
- * Returns:     1 if it is OK to merge the block into the request.  0
- *              if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes:       Some drivers have limited scatter-gather table sizes, and
- *              thus they cannot queue an infinitely large command.  This
- *              function is called from ll_rw_blk before it attempts to merge
- *              a new block into a request to make sure that the request will
- *              not become too large.
- *
- *              This function is not designed to be directly called.  Instead
- *              it should be referenced from other functions where the
- *              dma_host parameter should be an integer constant. The
- *              compiler should thus be able to properly optimize the code,
- *              eliminating stuff that is irrelevant.
- *              It is more maintainable to do this way with a single function
- *              than to have 4 separate functions all doing roughly the
- *              same thing.
- */
-__inline static int __scsi_back_merge_fn(request_queue_t * q,
-                                        struct request *req,
-                                        struct bio *bio)
-{
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-               req->flags |= REQ_NOMERGE;
-               return 0;
-       }
-
-#ifdef DMA_CHUNK_SIZE
-       if (MERGEABLE_BUFFERS(req->biotail, bio))
-               return scsi_new_mergeable(q, req, bio);
-#endif
-
-       return scsi_new_segment(q, req, bio);
-}
-
-__inline static int __scsi_front_merge_fn(request_queue_t * q,
-                                         struct request *req,
-                                         struct bio *bio)
-{
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-               req->flags |= REQ_NOMERGE;
-               return 0;
-       }
-
-#ifdef DMA_CHUNK_SIZE
-       if (MERGEABLE_BUFFERS(bio, req->bio))
-               return scsi_new_mergeable(q, req, bio);
-#endif
-       return scsi_new_segment(q, req, bio);
-}
-
-/*
- * Function:    scsi_merge_fn_()
- *
- * Purpose:     queue merge function.
+ * Function:    scsi_init_io()
  *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              bio     - Block which we may wish to merge into request
- *
- * Returns:     1 if it is OK to merge the block into the request.  0
- *              if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes:       Optimized for different cases depending upon whether
- *              ISA DMA is in use and whether clustering should be used.
- */
-#define MERGEFCT(_FUNCTION, _BACK_FRONT)                               \
-static int _FUNCTION(request_queue_t * q,                              \
-                    struct request * req,                              \
-                    struct bio *bio)                                   \
-{                                                                      \
-    int ret;                                                           \
-    ret =  __scsi_ ## _BACK_FRONT ## _merge_fn(q,                      \
-                                              req,                     \
-                                              bio);                    \
-    return ret;                                                                \
-}
-
-MERGEFCT(scsi_back_merge_fn, back)
-MERGEFCT(scsi_front_merge_fn, front)
-
-/*
- * Function:    scsi_merge_requests_fn_()
- *
- * Purpose:     queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              next    - Block which we may wish to merge into request
- *
- * Returns:     1 if it is OK to merge the block into the request.  0
- *              if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- */
-inline static int scsi_merge_requests_fn(request_queue_t * q,
-                                        struct request *req,
-                                        struct request *next)
-{
-       int bio_segs;
-
-       /*
-        * First check if the either of the requests are re-queued
-        * requests.  Can't merge them if they are.
-        */
-       if (req->special || next->special)
-               return 0;
-
-       /*
-        * will become to large?
-        */
-       if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
-               return 0;
-
-       bio_segs = req->nr_segments + next->nr_segments;
-       if (blk_contig_segment(q, req->biotail, next->bio))
-               bio_segs--;
-
-       /*
-        * exceeds our max allowed segments?
-        */
-       if (bio_segs > q->max_segments)
-               return 0;
-
-#ifdef DMA_CHUNK_SIZE
-       bio_segs = req->nr_hw_segments + next->nr_hw_segments;
-       if (blk_contig_segment(q, req->biotail, next->bio))
-               bio_segs--;
-
-       /* If dynamic DMA mapping can merge last segment in req with
-        * first segment in next, then the check for hw segments was
-        * done above already, so we can always merge.
-        */
-       if (bio_segs > q->max_segments)
-               return 0;
-
-       req->nr_hw_segments = bio_segs;
-#endif
-
-       /*
-        * This will form the start of a new segment.  Bump the 
-        * counter.
-        */
-       req->nr_segments = bio_segs;
-       return 1;
-}
-
-/*
- * Function:    __init_io()
- *
- * Purpose:     Prototype for io initialize function.
+ * Purpose:     SCSI I/O initialize function.
  *
  * Arguments:   SCpnt   - Command descriptor we wish to initialize
- *              sg_count_valid  - 1 if the sg count in the req is valid.
- *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
- *                      expose all of the address lines, so that DMA cannot
- *                      be done from an arbitrary address).
  *
  * Returns:     1 on success.
  *
  * Lock status: 
- *
- * Notes:       Only the SCpnt argument should be a non-constant variable.
- *              This function is designed in such a way that it will be
- *              invoked from a series of small stubs, each of which would
- *              be optimized for specific circumstances.
- *
- *              The advantage of this is that hosts that don't do DMA
- *              get versions of the function that essentially don't have
- *              any of the DMA code.  Same goes for clustering - in the
- *              case of hosts with no need for clustering, there is no point
- *              in a whole bunch of overhead.
- *
- *              Finally, in the event that a host has set can_queue to SG_ALL
- *              implying that there is no limit to the length of a scatter
- *              gather list, the sg count in the request won't be valid
- *              (mainly because we don't need queue management functions
- *              which keep the tally uptodate.
  */
-__inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
+int scsi_init_io(Scsi_Cmnd *SCpnt)
 {
-       struct bio         * bio;
-       char               * buff;
-       int                  count;
-       int                  i;
-       struct request     * req;
-       int                  sectors;
-       struct scatterlist * sgpnt;
-       int                  this_count;
-       void               ** bbpnt;
+       struct request     *req;
+       struct scatterlist *sgpnt;
+       int count, gfp_mask;
 
        req = &SCpnt->request;
 
        /*
         * First we need to know how many scatter gather segments are needed.
         */
-       count = req->nr_segments;
-
-       /*
-        * If the dma pool is nearly empty, then queue a minimal request
-        * with a single segment.  Typically this will satisfy a single
-        * buffer.
-        */
-       if (dma_host && scsi_dma_free_sectors <= 10) {
-               this_count = req->current_nr_sectors;
-               goto single_segment;
-       }
+       count = req->nr_phys_segments;
 
        /*
         * we used to not use scatter-gather for single segment request,
@@ -497,50 +77,17 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
         */
        SCpnt->use_sg = count;
 
-       /* 
-        * Allocate the actual scatter-gather table itself.
-        */
-       SCpnt->sglist_len = (SCpnt->use_sg * sizeof(struct scatterlist));
+       gfp_mask = GFP_NOIO;
+       if (in_interrupt())
+               gfp_mask &= ~__GFP_WAIT;
 
-       /* If we could potentially require ISA bounce buffers, allocate
-        * space for this array here.
-        */
-       if (dma_host)
-               SCpnt->sglist_len += (SCpnt->use_sg * sizeof(void *));
+       sgpnt = scsi_alloc_sgtable(SCpnt, gfp_mask);
+       BUG_ON(!sgpnt);
 
-       /* scsi_malloc can only allocate in chunks of 512 bytes so
-        * round it up.
-        */
-       SCpnt->sglist_len = (SCpnt->sglist_len + 511) & ~511;
-       sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);
-
-       if (!sgpnt) {
-               struct Scsi_Host *SHpnt = SCpnt->host;
-
-               /*
-                * If we cannot allocate the scatter-gather table, then
-                * simply write the first buffer all by itself.
-                */
-               printk("Warning - running *really* short on DMA buffers\n");
-               this_count = req->current_nr_sectors;
-               printk("SCSI: depth is %d, # segs %d, # hw segs %d\n", SHpnt->host_busy, req->nr_segments, req->nr_hw_segments);
-               goto single_segment;
-       }
-
-       memset(sgpnt, 0, SCpnt->sglist_len);
        SCpnt->request_buffer = (char *) sgpnt;
        SCpnt->request_bufflen = 0;
        req->buffer = NULL;
 
-       if (dma_host)
-               bbpnt = (void **) ((char *)sgpnt +
-                        (SCpnt->use_sg * sizeof(struct scatterlist)));
-       else
-               bbpnt = NULL;
-
-       SCpnt->bounce_buffers = bbpnt;
-
        /* 
         * Next, walk the list, and fill in the addresses and sizes of
         * each segment.
@@ -549,183 +96,22 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
        count = blk_rq_map_sg(req->q, req, SCpnt->request_buffer);
 
        /*
-        * Verify that the count is correct.
+        * mapped well, send it off
         */
-       if (count > SCpnt->use_sg) {
-               printk("Incorrect number of segments after building list\n");
-               printk("counted %d, received %d\n", count, SCpnt->use_sg);
-               printk("req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, req->current_nr_sectors);
-               scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
-               this_count = req->current_nr_sectors;
-               goto single_segment;
-       }
-
-       SCpnt->use_sg = count;
-
-       if (!dma_host)
+       if (count <= SCpnt->use_sg) {
+               SCpnt->use_sg = count;
                return 1;
-
-       /*
-        * Now allocate bounce buffers, if needed.
-        */
-       SCpnt->request_bufflen = 0;
-       for (i = 0; i < count; i++) {
-               sectors = (sgpnt[i].length >> 9);
-               SCpnt->request_bufflen += sgpnt[i].length;
-               if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
-                    ISA_DMA_THRESHOLD) {
-                       if( scsi_dma_free_sectors - sectors <= 10  ) {
-                               /*
-                                * If this would nearly drain the DMA
-                                * pool empty, then let's stop here.
-                                * Don't make this request any larger.
-                                * This is kind of a safety valve that
-                                * we use - we could get screwed later
-                                * on if we run out completely.  
-                                */
-                               SCpnt->request_bufflen -= sgpnt[i].length;
-                               SCpnt->use_sg = i;
-                               if (i == 0) {
-                                       goto big_trouble;
-                               }
-                               break;
-                       }
-
-                       /*
-                        * this is not a dma host, so it will never
-                        * be a highmem page
-                        */
-                       bbpnt[i] = page_address(sgpnt[i].page) +sgpnt[i].offset;
-                       sgpnt[i].address = (char *)scsi_malloc(sgpnt[i].length);
-                       /*
-                        * If we cannot allocate memory for this DMA bounce
-                        * buffer, then queue just what we have done so far.
-                        */
-                       if (sgpnt[i].address == NULL) {
-                               printk("Warning - running low on DMA memory\n");
-                               SCpnt->request_bufflen -= sgpnt[i].length;
-                               SCpnt->use_sg = i;
-                               if (i == 0) {
-                                       goto big_trouble;
-                               }
-                               break;
-                       }
-                       if (rq_data_dir(req) == WRITE)
-                               memcpy(sgpnt[i].address, bbpnt[i],
-                                      sgpnt[i].length);
-               }
        }
-       return 1;
-
-      big_trouble:
-       /*
-        * We come here in the event that we get one humongous
-        * request, where we need a bounce buffer, and the buffer is
-        * more than we can allocate in a single call to
-        * scsi_malloc().  In addition, we only come here when it is
-        * the 0th element of the scatter-gather table that gets us
-        * into this trouble.  As a fallback, we fall back to
-        * non-scatter-gather, and ask for a single segment.  We make
-        * a half-hearted attempt to pick a reasonably large request
-        * size mainly so that we don't thrash the thing with
-        * iddy-biddy requests.
-        */
-
-       /*
-        * The original number of sectors in the 0th element of the
-        * scatter-gather table.  
-        */
-       sectors = sgpnt[0].length >> 9;
 
-       /* 
-        * Free up the original scatter-gather table.  Note that since
-        * it was the 0th element that got us here, we don't have to
-        * go in and free up memory from the other slots.  
-        */
-       SCpnt->request_bufflen = 0;
-       SCpnt->use_sg = 0;
-       scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
-
-       /*
-        * Make an attempt to pick up as much as we reasonably can.
-        * Just keep adding sectors until the pool starts running kind of
-        * low.  The limit of 30 is somewhat arbitrary - the point is that
-        * it would kind of suck if we dropped down and limited ourselves to
-        * single-block requests if we had hundreds of free sectors.
-        */
-       if( scsi_dma_free_sectors > 30 ) {
-               for (this_count = 0, bio = req->bio; bio; bio = bio->bi_next) {
-                       if( scsi_dma_free_sectors - this_count < 30 
-                           || this_count == sectors )
-                       {
-                               break;
-                       }
-                       this_count += bio_sectors(bio);
-               }
-
-       } else {
-               /*
-                * Yow!   Take the absolute minimum here.
-                */
-               this_count = req->current_nr_sectors;
-       }
-
-       /*
-        * Now drop through into the single-segment case.
-        */
-       
-      single_segment:
-       /*
-        * Come here if for any reason we choose to do this as a single
-        * segment.  Possibly the entire request, or possibly a small
-        * chunk of the entire request.
-        */
-
-       bio = req->bio;
-       buff = req->buffer = bio_data(bio);
-
-       if (dma_host || PageHighMem(bio_page(bio))) {
-               /*
-                * Allocate a DMA bounce buffer.  If the allocation fails, fall
-                * back and allocate a really small one - enough to satisfy
-                * the first buffer.
-                */
-               if (bio_to_phys(bio) + bio->bi_size - 1 > ISA_DMA_THRESHOLD) {
-                       buff = (char *) scsi_malloc(this_count << 9);
-                       if (!buff) {
-                               printk("Warning - running low on DMA memory\n");
-                               this_count = req->current_nr_sectors;
-                               buff = (char *) scsi_malloc(this_count << 9);
-                               if (!buff) {
-                                       dma_exhausted(SCpnt, 0);
-                                       return 0;
-                               }
-                       }
-                       if (rq_data_dir(req) == WRITE) {
-                               unsigned long flags;
-                               char *buf = bio_kmap_irq(bio, &flags);
-                               memcpy(buff, buf, this_count << 9);
-                               bio_kunmap_irq(buf, &flags);
-                       }
-               }
-       }
-       SCpnt->request_bufflen = this_count << 9;
-       SCpnt->request_buffer = buff;
-       SCpnt->use_sg = 0;
-       return 1;
+       printk("Incorrect number of segments after building list\n");
+       printk("counted %d, received %d\n", count, SCpnt->use_sg);
+       printk("req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, req->current_nr_sectors);
+       BUG();
+       return 0; /* ahem */
 }
 
-#define INITIO(_FUNCTION, _DMA)                        \
-static int _FUNCTION(Scsi_Cmnd * SCpnt)                \
-{                                              \
-    return __init_io(SCpnt, _DMA);             \
-}
-
-INITIO(scsi_init_io_v, 0)
-INITIO(scsi_init_io_vd, 1)
-
 /*
- * Function:    initialize_merge_fn()
+ * Function:    scsi_initialize_merge_fn()
  *
  * Purpose:     Initialize merge function for a host
  *
@@ -737,35 +123,15 @@ INITIO(scsi_init_io_vd, 1)
  *
  * Notes:
  */
-void initialize_merge_fn(Scsi_Device * SDpnt)
+void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
 {
        struct Scsi_Host *SHpnt = SDpnt->host;
        request_queue_t *q = &SDpnt->request_queue;
        dma64_addr_t bounce_limit;
 
        /*
-        * If this host has an unlimited tablesize, then don't bother with a
-        * merge manager.  The whole point of the operation is to make sure
-        * that requests don't grow too large, and this host isn't picky.
-        *
-        * Note that ll_rw_blk.c is effectively maintaining a segment
-        * count which is only valid if clustering is used, and it obviously
-        * doesn't handle the DMA case.   In the end, it
-        * is simply easier to do it ourselves with our own functions
-        * rather than rely upon the default behavior of ll_rw_blk.
-        */
-       q->back_merge_fn = scsi_back_merge_fn;
-       q->front_merge_fn = scsi_front_merge_fn;
-       q->merge_requests_fn = scsi_merge_requests_fn;
-
-       if (SHpnt->unchecked_isa_dma == 0) {
-               SDpnt->scsi_init_io_fn = scsi_init_io_v;
-       } else {
-               SDpnt->scsi_init_io_fn = scsi_init_io_vd;
-       }
-
-       /*
-        * now enable highmem I/O, if appropriate
+        * The generic merging functions work just fine for us.
+        * Enable highmem I/O, if appropriate.
         */
        bounce_limit = BLK_BOUNCE_HIGH;
        if (SHpnt->highmem_io && (SDpnt->type == TYPE_DISK)) {
@@ -777,6 +143,8 @@ void initialize_merge_fn(Scsi_Device * SDpnt)
                else
                        bounce_limit = SHpnt->pci_dev->dma_mask;
        }
+       if (SHpnt->unchecked_isa_dma)
+               bounce_limit = BLK_BOUNCE_ISA;
 
        blk_queue_bounce_limit(q, bounce_limit);
 }
index ad3e31af55b314359731325b0174b8cf1c3c34b2..9cd871b0ceec0c88be4e68c140066d4d08a7e51b 100644 (file)
@@ -320,7 +320,7 @@ void scan_scsis(struct Scsi_Host *shpnt,
        SDpnt->host = shpnt;
        SDpnt->online = TRUE;
 
-       initialize_merge_fn(SDpnt);
+       scsi_initialize_merge_fn(SDpnt);
 
         /*
          * Initialize the object that we will use to wait for command blocks.
@@ -390,8 +390,6 @@ void scan_scsis(struct Scsi_Host *shpnt,
                                        }
                                }
                        }
-                       scsi_resize_dma_pool();
-
                        for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
                                if (sdtpnt->finish && sdtpnt->nr_dev) {
                                        (*sdtpnt->finish) ();
@@ -759,7 +757,7 @@ static int scan_scsis_single(unsigned int channel, unsigned int dev,
         */
        scsi_initialize_queue(SDpnt, shpnt);
        SDpnt->host = shpnt;
-       initialize_merge_fn(SDpnt);
+       scsi_initialize_merge_fn(SDpnt);
 
        /*
         * Mark this device as online, or otherwise we won't be able to do much with it.
index 7fb246998bffcd81d6df816f01a4199ac3635f8c..dbe14bacb5ff2035da23c00bf3f669dca3324734 100644 (file)
@@ -33,8 +33,6 @@
  */
 EXPORT_SYMBOL(scsi_register_module);
 EXPORT_SYMBOL(scsi_unregister_module);
-EXPORT_SYMBOL(scsi_free);
-EXPORT_SYMBOL(scsi_malloc);
 EXPORT_SYMBOL(scsi_register);
 EXPORT_SYMBOL(scsi_unregister);
 EXPORT_SYMBOL(scsicam_bios_param);
@@ -48,9 +46,7 @@ EXPORT_SYMBOL(print_sense);
 EXPORT_SYMBOL(print_req_sense);
 EXPORT_SYMBOL(print_msg);
 EXPORT_SYMBOL(print_status);
-EXPORT_SYMBOL(scsi_dma_free_sectors);
 EXPORT_SYMBOL(kernel_scsi_ioctl);
-EXPORT_SYMBOL(scsi_need_isa_buffer);
 EXPORT_SYMBOL(scsi_release_command);
 EXPORT_SYMBOL(print_Scsi_Cmnd);
 EXPORT_SYMBOL(scsi_block_when_processing_errors);
index eb93a833a086f0c8f890856c4658a37f5b2e010c..7bada9dd8e49b16df1466564e36e102333945271 100644 (file)
@@ -765,7 +765,7 @@ static int sd_init_onedisk(int i)
                return i;
        }
 
-       buffer = (unsigned char *) scsi_malloc(512);
+       buffer = kmalloc(512, GFP_DMA);
        if (!buffer) {
                printk(KERN_WARNING "(sd_init_onedisk:) Memory allocation failure.\n");
                scsi_release_request(SRpnt);
@@ -1042,7 +1042,7 @@ static int sd_init_onedisk(int i)
        scsi_release_request(SRpnt);
        SRpnt = NULL;
 
-       scsi_free(buffer, 512);
+       kfree(buffer);
        return i;
 }
 
@@ -1111,7 +1111,7 @@ static int sd_init()
                 * commands if they know what they're doing and they ask for it
                 * explicitly via the SHpnt->max_sectors API.
                 */
-               sd_max_sectors[i] = MAX_SEGMENTS*8;
+               sd_max_sectors[i] = MAX_PHYS_SEGMENTS*8;
        }
 
        for (i = 0; i < N_USED_SD_MAJORS; i++) {
index 8c637639dbc64513cd52c00e08f44b0e83848f48..44a5075b8330117dcaf7acf44f051ab14dc45e64 100644 (file)
@@ -2280,9 +2280,8 @@ static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp)
             rqSz = num_sect * SG_SECTOR_SZ;
         }
         while (num_sect > 0) {
-            if ((num_sect <= sg_pool_secs_avail) &&
-                (scsi_dma_free_sectors > (SG_LOW_POOL_THRESHHOLD + num_sect))) {
-                resp = scsi_malloc(rqSz);
+            if ((num_sect <= sg_pool_secs_avail)) {
+                resp = kmalloc(rqSz, page_mask);
                 if (resp) {
                     if (retSzp) *retSzp = rqSz;
                     sg_pool_secs_avail -= num_sect;
@@ -2374,7 +2373,7 @@ static void sg_low_free(char * buff, int size, int mem_src)
        {
            int num_sect = size / SG_SECTOR_SZ;
 
-           scsi_free(buff, size);
+           kfree(buff);
            sg_pool_secs_avail += num_sect;
        }
        break;
@@ -2681,9 +2680,8 @@ static int sg_proc_debug_info(char * buffer, int * len, off_t * begin,
     max_dev = sg_last_dev();
     PRINT_PROC("dev_max(currently)=%d max_active_device=%d (origin 1)\n",
               sg_template.dev_max, max_dev);
-    PRINT_PROC(" scsi_dma_free_sectors=%u sg_pool_secs_aval=%d "
-              "def_reserved_size=%d\n",
-              scsi_dma_free_sectors, sg_pool_secs_avail, sg_big_buff);
+    PRINT_PROC(" sg_pool_secs_aval=%d def_reserved_size=%d\n",
+              sg_pool_secs_avail, sg_big_buff);
     for (j = 0; j < max_dev; ++j) {
        if ((sdp = sg_get_dev(j))) {
            Sg_fd * fp;
index 1d1c2714149a64982cb97ac299ea50f82583de7b..530f89385428469d939a2d4b805d4eb03bbc1bf6 100644 (file)
@@ -258,112 +258,6 @@ static request_queue_t *sr_find_queue(kdev_t dev)
        return &scsi_CDs[MINOR(dev)].device->request_queue;
 }
 
-static int sr_scatter_pad(Scsi_Cmnd *SCpnt, int s_size)
-{
-       struct scatterlist *sg, *old_sg = NULL;
-       int i, fsize, bsize, sg_ent, sg_count;
-       char *front, *back;
-       void **bbpnt, **old_bbpnt = NULL;
-
-       back = front = NULL;
-       sg_ent = SCpnt->use_sg;
-       bsize = 0; /* gcc... */
-
-       /*
-        * need front pad
-        */
-       if ((fsize = SCpnt->request.sector % (s_size >> 9))) {
-               fsize <<= 9;
-               sg_ent++;
-               if ((front = scsi_malloc(fsize)) == NULL)
-                       goto no_mem;
-       }
-       /*
-        * need a back pad too
-        */
-       if ((bsize = s_size - ((SCpnt->request_bufflen + fsize) % s_size))) {
-               sg_ent++;
-               if ((back = scsi_malloc(bsize)) == NULL)
-                       goto no_mem;
-       }
-
-       /*
-        * extend or allocate new scatter-gather table
-        */
-       sg_count = SCpnt->use_sg;
-       if (sg_count) {
-               old_sg = (struct scatterlist *) SCpnt->request_buffer;
-               old_bbpnt = SCpnt->bounce_buffers;
-       } else {
-               sg_count = 1;
-               sg_ent++;
-       }
-
-       /* Get space for scatterlist and bounce buffer array. */
-       i  = sg_ent * sizeof(struct scatterlist);
-       i += sg_ent * sizeof(void *);
-       i  = (i + 511) & ~511;
-
-       if ((sg = scsi_malloc(i)) == NULL)
-               goto no_mem;
-
-       bbpnt = (void **)
-               ((char *)sg + (sg_ent * sizeof(struct scatterlist)));
-
-       /*
-        * no more failing memory allocs possible, we can safely assign
-        * SCpnt values now
-        */
-       SCpnt->sglist_len = i;
-       SCpnt->use_sg = sg_count;
-       memset(sg, 0, SCpnt->sglist_len);
-
-       i = 0;
-       if (fsize) {
-               sg[0].address = bbpnt[0] = front;
-               sg[0].length = fsize;
-               i++;
-       }
-       if (old_sg) {
-               memcpy(sg + i, old_sg, SCpnt->use_sg * sizeof(struct scatterlist));
-               if (old_bbpnt)
-                       memcpy(bbpnt + i, old_bbpnt, SCpnt->use_sg * sizeof(void *));
-               scsi_free(old_sg, (((SCpnt->use_sg * sizeof(struct scatterlist)) +
-                                   (SCpnt->use_sg * sizeof(void *))) + 511) & ~511);
-       } else {
-               sg[i].address = NULL;
-               sg[i].page = virt_to_page(SCpnt->request_buffer);
-               sg[i].offset = (unsigned long) SCpnt->request_buffer&~PAGE_MASK;
-               sg[i].length = SCpnt->request_bufflen;
-       }
-
-       SCpnt->request_bufflen += (fsize + bsize);
-       SCpnt->request_buffer = sg;
-       SCpnt->bounce_buffers = bbpnt;
-       SCpnt->use_sg += i;
-
-       if (bsize) {
-               sg[SCpnt->use_sg].address = NULL;
-               sg[SCpnt->use_sg].page = virt_to_page(back);
-               sg[SCpnt->use_sg].offset = (unsigned long) back & ~PAGE_MASK;
-               bbpnt[SCpnt->use_sg] = back;
-               sg[SCpnt->use_sg].length = bsize;
-               SCpnt->use_sg++;
-       }
-
-       return 0;
-
-no_mem:
-       printk("sr: ran out of mem for scatter pad\n");
-       if (front)
-               scsi_free(front, fsize);
-       if (back)
-               scsi_free(back, bsize);
-
-       return 1;
-}
-
-
 static int sr_init_command(Scsi_Cmnd * SCpnt)
 {
        int dev, devm, block=0, this_count, s_size;
@@ -429,9 +323,10 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
        /*
         * request doesn't start on hw block boundary, add scatter pads
         */
-       if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size))
-               if (sr_scatter_pad(SCpnt, s_size))
-                       return 0;
+       if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size)) {
+               printk("sr: unaligned transfer\n");
+               return 0;
+       }
 
        this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
 
@@ -583,7 +478,7 @@ void get_sectorsize(int i)
        int sector_size;
        Scsi_Request *SRpnt;
 
-       buffer = (unsigned char *) scsi_malloc(512);
+       buffer = (unsigned char *) kmalloc(512, GFP_DMA);
        SRpnt = scsi_allocate_request(scsi_CDs[i].device);
        
        if(buffer == NULL || SRpnt == NULL)
@@ -592,7 +487,7 @@ void get_sectorsize(int i)
                sector_size = 2048;     /* A guess, just in case */
                scsi_CDs[i].needs_sector_size = 1;
                if(buffer)
-                       scsi_free(buffer, 512);
+                       kfree(buffer);
                if(SRpnt)
                        scsi_release_request(SRpnt);
                return;
@@ -673,7 +568,7 @@ void get_sectorsize(int i)
                sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
        };
        blk_queue_hardsect_size(blk_get_queue(MAJOR_NR), sector_size);
-       scsi_free(buffer, 512);
+       kfree(buffer);
 }
 
 void get_capabilities(int i)
@@ -694,7 +589,7 @@ void get_capabilities(int i)
                ""
        };
 
-       buffer = (unsigned char *) scsi_malloc(512);
+       buffer = (unsigned char *) kmalloc(512, GFP_DMA);
        if (!buffer)
        {
                printk(KERN_ERR "sr: out of memory.\n");
@@ -714,7 +609,7 @@ void get_capabilities(int i)
                scsi_CDs[i].cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
                                         CDC_DVD | CDC_DVD_RAM |
                                         CDC_SELECT_DISC | CDC_SELECT_SPEED);
-               scsi_free(buffer, 512);
+               kfree(buffer);
                printk("sr%i: scsi-1 drive\n", i);
                return;
        }
@@ -767,7 +662,7 @@ void get_capabilities(int i)
        /*else    I don't think it can close its tray
           scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
 
-       scsi_free(buffer, 512);
+       kfree(buffer);
 }
 
 /*
index 3c3a53ad4e7116a8d9a010100f56ed69064bf013..da3ec60fe77d24f49e9ebca12354776713826878 100644 (file)
@@ -95,7 +95,7 @@ int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflen
        SRpnt->sr_request.buffer = buffer;
        if (buffer && SRpnt->sr_host->unchecked_isa_dma &&
            (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
-               bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511);
+               bounce_buffer = (char *) kmalloc(buflength, GFP_DMA);
                if (bounce_buffer == NULL) {
                        printk("SCSI DMA pool exhausted.");
                        return -ENOMEM;
@@ -114,7 +114,7 @@ int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflen
        req = &SRpnt->sr_request;
        if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
                memcpy(req->buffer, SRpnt->sr_buffer, SRpnt->sr_bufflen);
-               scsi_free(SRpnt->sr_buffer, (SRpnt->sr_bufflen + 511) & ~511);
+               kfree(SRpnt->sr_buffer);
                SRpnt->sr_buffer = req->buffer;
         }
 
@@ -519,7 +519,7 @@ int sr_is_xa(int minor)
        if (!xa_test)
                return 0;
 
-       raw_sector = (unsigned char *) scsi_malloc(2048 + 512);
+       raw_sector = (unsigned char *) kmalloc(2048, GFP_DMA | GFP_KERNEL);
        if (!raw_sector)
                return -ENOMEM;
        if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16,
@@ -529,7 +529,7 @@ int sr_is_xa(int minor)
                /* read a raw sector failed for some reason. */
                is_xa = -1;
        }
-       scsi_free(raw_sector, 2048 + 512);
+       kfree(raw_sector);
 #ifdef DEBUG
        printk("sr%d: sr_is_xa: %d\n", minor, is_xa);
 #endif
index 39bd3b6cb226435c40067fc11724767d05d429cf..a1d4a7db41a67a87d8ee0d463645258a928164dd 100644 (file)
@@ -115,7 +115,7 @@ int sr_set_blocklength(int minor, int blocklength)
                density = (blocklength > 2048) ? 0x81 : 0x83;
 #endif
 
-       buffer = (unsigned char *) scsi_malloc(512);
+       buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA);
        if (!buffer)
                return -ENOMEM;
 
@@ -142,7 +142,7 @@ int sr_set_blocklength(int minor, int blocklength)
                printk("sr%d: switching blocklength to %d bytes failed\n",
                       minor, blocklength);
 #endif
-       scsi_free(buffer, 512);
+       kfree(buffer);
        return rc;
 }
 
@@ -162,7 +162,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
        if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION)
                return 0;
 
-       buffer = (unsigned char *) scsi_malloc(512);
+       buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA);
        if (!buffer)
                return -ENOMEM;
 
@@ -306,6 +306,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
                printk(KERN_DEBUG "sr%d: multisession offset=%lu\n",
                       minor, sector);
 #endif
-       scsi_free(buffer, 512);
+       kfree(buffer);
        return rc;
 }
index 70d8a00df5fe8f289190b414eb844aed81950948..bc030dcb4fcc7cbedcd46a4b92f580ac906743be 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
@@ -12126,13 +12126,16 @@ static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
 
        if (!use_sg)
                segn = ncr_scatter_no_sglist(np, cp, cmd);
-       else if (use_sg > MAX_SCATTER)
-               segn = -1;
        else {
                struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
                struct scr_tblmove *data;
 
                use_sg = map_scsi_sg_data(np, cmd);
+               if (use_sg > MAX_SCATTER) {
+                       unmap_scsi_data(np, cmd);
+                       return -1;
+               }
+
                data = &cp->phys.data[MAX_SCATTER - use_sg];
 
                for (segn = 0; segn < use_sg; segn++) {
@@ -12165,13 +12168,15 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
 
        if (!use_sg)
                segment = ncr_scatter_no_sglist(np, cp, cmd);
-       else if (use_sg > MAX_SCATTER)
-               segment = -1;
        else {
                struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
                struct scr_tblmove *data;
 
                use_sg = map_scsi_sg_data(np, cmd);
+               if (use_sg > MAX_SCATTER) {
+                       unmap_scsi_data(np, cmd);
+                       return -1;
+               }
                data = &cp->phys.data[MAX_SCATTER - use_sg];
 
                for (segment = 0; segment < use_sg; segment++) {
index 780a8df9b70ca40041187500552a2b180f7006fa..256d34b6461b90e22dac71fd8e7eee6ae962aa51 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
@@ -96,7 +96,7 @@ int sym53c8xx_release(struct Scsi_Host *);
                        this_id:        7,                      \
                        sg_tablesize:   SCSI_NCR_SG_TABLESIZE,  \
                        cmd_per_lun:    SCSI_NCR_CMD_PER_LUN,   \
-                       max_sectors:    MAX_SEGMENTS*8,         \
+                       max_sectors:    MAX_HW_SEGMENTS*8,      \
                        use_clustering: DISABLE_CLUSTERING,     \
                        highmem_io:     1} 
 
index c020492c0c8dce914abc5fe2ccd83498c78548c5..6a4a3f8550485334a87ab77bd2c0e62a88daaa6b 100644 (file)
@@ -128,3 +128,21 @@ Sun Oct 28 15:00 2001 Gerard Roudier
        * version sym-2.1.16-20011028
        - Slightly simplify driver configuration.
        - Prepare a new patch against linux-2.4.13.
+
+Sat Nov 17 10:00 2001 Gerard Roudier 
+       * version sym-2.1.17
+       - Fix a couple of gcc/gcc3 warnings.
+       - Allocate separately from the HCB the array for CCBs hashed by DSA.
+         All driver memory allocations are now not greater than 1 PAGE 
+         even on PPC64 / 4KB PAGE surprising setup.
+
+Sat Dec 01 18:00 2001 Gerard Roudier 
+       * version sym-2.1.17a
+       - Use u_long instead of U32 for the IO base cookie. This is more 
+         consistent with what archs are expecting.
+       - Use MMIO per default for Power PC instead of some fake normal IO,
+         as Paul Mackerras stated that MMIO works fine now on this arch.
+
+
+
+
index 9c7ef0284753b14148eabecd06ff10567b03aaaa..0f6114bda6369838a676ab352e1bfa914f6ba419 100644 (file)
@@ -130,17 +130,17 @@ int sym53c8xx_release(struct Scsi_Host *);
 #if !defined(HOSTS_C)
 
 /*
- *  Use normal IO if configured. Forced for alpha and powerpc.
- *  Powerpc fails copying to on-chip RAM using memcpy_toio().
+ *  Use normal IO if configured.
+ *  Normal IO forced for alpha.
  *  Forced to MMIO for sparc.
  */
 #if defined(__alpha__)
 #define        SYM_CONF_IOMAPPED
-#elif defined(__powerpc__)
-#define        SYM_CONF_IOMAPPED
-#define SYM_OPT_NO_BUS_MEMORY_MAPPING
 #elif defined(__sparc__)
 #undef SYM_CONF_IOMAPPED
+/* #elif defined(__powerpc__) */
+/* #define     SYM_CONF_IOMAPPED */
+/* #define SYM_OPT_NO_BUS_MEMORY_MAPPING */
 #elif defined(CONFIG_SCSI_SYM53C8XX_IOMAPPED)
 #define        SYM_CONF_IOMAPPED
 #endif
index 9c9c3bc4d122d4522eeac794f53b45698d702458..d8c43a9d5abb74c2a56d206506354d2850724ac5 100644 (file)
@@ -647,12 +647,15 @@ static int sym_scatter(hcb_p np, ccb_p cp, Scsi_Cmnd *cmd)
 
        if (!use_sg)
                segment = sym_scatter_no_sglist(np, cp, cmd);
-       else if (use_sg > SYM_CONF_MAX_SG)
-               segment = -1;
        else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
                struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
                struct sym_tblmove *data;
 
+               if (use_sg > SYM_CONF_MAX_SG) {
+                       unmap_scsi_data(np, cmd);
+                       return -1;
+               }
+
                data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
 
                for (segment = 0; segment < use_sg; segment++) {
@@ -2452,8 +2455,8 @@ sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, sym_device *device)
        u_char pci_fix_up = SYM_SETUP_PCI_FIX_UP;
        u_char revision;
        u_int irq;
-       u_long base, base_2, io_port
-       u_long base_c, base_2_c; 
+       u_long base, base_2, base_io
+       u_long base_c, base_2_c, io_port
        int i;
        sym_chip *chip;
 
@@ -2470,7 +2473,7 @@ sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, sym_device *device)
        device_id = PciDeviceId(pdev);
        irq       = PciIrqLine(pdev);
 
-       i = pci_get_base_address(pdev, 0, &io_port);
+       i = pci_get_base_address(pdev, 0, &base_io);
        io_port = pci_get_base_cookie(pdev, 0);
 
        base_c = pci_get_base_cookie(pdev, i);
@@ -2488,9 +2491,9 @@ sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, sym_device *device)
        /*
         *  If user excluded this chip, donnot initialize it.
         */
-       if (io_port) {
+       if (base_io) {
                for (i = 0 ; i < 8 ; i++) {
-                       if (sym_driver_setup.excludes[i] == io_port)
+                       if (sym_driver_setup.excludes[i] == base_io)
                                return -1;
                }
        }
index c89832f20463e052f84c4c56f0b459a18f90b868..4db72ce33f4ed4804ae11f00ff94567eb424b506 100644 (file)
@@ -77,7 +77,6 @@
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/string.h>
-#include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/ioport.h>
 #include <linux/time.h>
@@ -463,7 +462,7 @@ struct sym_shcb {
 
        vm_offset_t     mmio_va;        /* MMIO kernel virtual address  */
        vm_offset_t     ram_va;         /* RAM  kernel virtual address  */
-       u32             io_port;        /* IO port address              */
+       u_long          io_port;        /* IO port address cookie       */
        u_short         io_ws;          /* IO window size               */
        int             irq;            /* IRQ number                   */
 
index ce06b7222e34ffc5b335bb97422caf98aa7ad008..3ed0e9e531574498fde68bcb5208e62cd4f3b18f 100644 (file)
@@ -4689,8 +4689,9 @@ out_reject:
        return;
 out_clrack:
        OUTL_DSP (SCRIPTA_BA (np, clrack));
+       return;
 out_stuck:
-       ;
+       return;
 }
 
 /*
@@ -5223,8 +5224,10 @@ static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln)
         *  And accept tagged commands now.
         */
        lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
+
+       return;
 fail:
-       ;
+       return;
 }
 
 /*
@@ -5786,6 +5789,13 @@ int sym_hcb_attach(hcb_p np, struct sym_fw *fw)
        if (!np->scripta0 || !np->scriptb0 || !np->scriptz0)
                goto attach_failed;
 
+       /*
+        *  Allocate the array of lists of CCBs hashed by DSA.
+        */
+       np->ccbh = sym_calloc(sizeof(ccb_p *)*CCB_HASH_SIZE, "CCBH");
+       if (!np->ccbh)
+               goto attach_failed;
+
        /*
         *  Initialyze the CCB free and busy queues.
         */
@@ -5977,6 +5987,8 @@ void sym_hcb_free(hcb_p np)
                        sym_mfree_dma(cp, sizeof(*cp), "CCB");
                }
        }
+       if (np->ccbh)
+               sym_mfree(np->ccbh, sizeof(ccb_p *)*CCB_HASH_SIZE, "CCBH");
 
        if (np->badluntbl)
                sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
index 62530d4c451b509be9cc4272cb8d3d037364f7a3..cd8d7919e741d8d1764dc44cf7c1ba878453a4aa 100644 (file)
@@ -1068,7 +1068,8 @@ struct sym_hcb {
        /*
         *  CCB lists and queue.
         */
-       ccb_p ccbh[CCB_HASH_SIZE];      /* CCB hashed by DSA value      */
+       ccb_p *ccbh;                    /* CCBs hashed by DSA value     */
+                                       /* CCB_HASH_SIZE lists of CCBs  */
        SYM_QUEHEAD     free_ccbq;      /* Queue of available CCBs      */
        SYM_QUEHEAD     busy_ccbq;      /* Queue of busy CCBs           */
 
index 57934acc0a35dd03207d5de4ee9d90290c7079c6..ec4f1cc41c4cf6d292b1b6f7431c84f84d30742e 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
index 52bd0eaab97fa2fa36372feeefc1195df5560e30..82f3f11195e34fd4b62d91cd6bfd97e4686d972c 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
index 30c400e5bcc6e47b30660d516172f865ac91ab71..720b707a34e01017bf3f4fccaa52123e667dbfe1 100644 (file)
@@ -66,7 +66,7 @@ struct adfs_discmap {
 
 /* Inode stuff */
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
-int adfs_get_block(struct inode *inode, long block,
+int adfs_get_block(struct inode *inode, sector_t block,
                   struct buffer_head *bh, int create);
 #else
 int adfs_bmap(struct inode *inode, int block);
index ef7715c4458da81b1542c247be3181bf7f97dbbf..66a0c36a74fbc3a0e4a1548b644a92c8ee90dc5f 100644 (file)
@@ -193,7 +193,7 @@ adfs_dir_read(struct super_block *sb, unsigned long object_id,
                        goto release_buffers;
                }
 
-               dir->bh[blk] = bread(sb->s_dev, phys, sb->s_blocksize);
+               dir->bh[blk] = sb_bread(sb, phys);
                if (!dir->bh[blk])
                        goto release_buffers;
        }
index 329bbd5f99e92d199ea9a229d47d449d19d9c66f..71064bc55150cbec89bf857c15ac3b1098729187 100644 (file)
@@ -35,7 +35,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
                goto out;
        }
 
-       dir->bh[0] = bread(sb->s_dev, block, sb->s_blocksize);
+       dir->bh[0] = sb_bread(sb, block);
        if (!dir->bh[0])
                goto out;
        dir->nr_buffers += 1;
@@ -60,7 +60,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
                        goto out;
                }
 
-               dir->bh[blk] = bread(sb->s_dev, block, sb->s_blocksize);
+               dir->bh[blk] = sb_bread(sb, block);
                if (!dir->bh[blk])
                        goto out;
                dir->nr_buffers = blk;
index 22d9bfd25049796b0e0639194d4e3b1bbb8b4075..9e402bcd9a63093d81d304e079a5d19a3b3c0de9 100644 (file)
@@ -27,7 +27,7 @@
  * not support creation of new blocks, so we return -EIO for this case.
  */
 int
-adfs_get_block(struct inode *inode, long block, struct buffer_head *bh, int create)
+adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, int create)
 {
        if (block < 0)
                goto abort_negative;
index 00be08b380826542aed0fba47388114d5b212649..f1af56308a1ed768775508e2e76fcc0b794e0a44 100644 (file)
@@ -263,7 +263,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
                dm[zone].dm_startbit = 0;
                dm[zone].dm_endbit   = zone_size;
                dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
-               dm[zone].dm_bh       = bread(sb->s_dev, map_addr, sb->s_blocksize);
+               dm[zone].dm_bh       = sb_bread(sb, map_addr);
 
                if (!dm[zone].dm_bh) {
                        adfs_error(sb, "unable to read map");
@@ -319,8 +319,9 @@ struct super_block *adfs_read_super(struct super_block *sb, void *data, int sile
        if (parse_options(sb, data))
                goto error;
 
+       sb->s_blocksize = BLOCK_SIZE;
        set_blocksize(dev, BLOCK_SIZE);
-       if (!(bh = bread(dev, ADFS_DISCRECORD / BLOCK_SIZE, BLOCK_SIZE))) {
+       if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
                adfs_error(sb, "unable to read superblock");
                goto error;
        }
@@ -354,7 +355,7 @@ struct super_block *adfs_read_super(struct super_block *sb, void *data, int sile
 
                brelse(bh);
                set_blocksize(dev, sb->s_blocksize);
-               bh = bread(dev, ADFS_DISCRECORD / sb->s_blocksize, sb->s_blocksize);
+               bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
                if (!bh) {
                        adfs_error(sb, "couldn't read superblock on "
                                "2nd try.");
index 8a168f71fe00f8cf790376e9d767e0ceb892e46d..a54289141ad2d83a8213685601e413cc88cbcf25 100644 (file)
@@ -38,8 +38,6 @@ static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
 static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
 static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
-static int affs_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create);
-
 static ssize_t affs_file_write(struct file *filp, const char *buf, size_t count, loff_t *ppos);
 static int affs_file_open(struct inode *inode, struct file *filp);
 static int affs_file_release(struct inode *inode, struct file *filp);
@@ -332,7 +330,7 @@ err_bread:
 }
 
 static int
-affs_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
+affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
 {
        struct super_block      *sb = inode->i_sb;
        struct buffer_head      *ext_bh;
index a2a034fc2fe2051c2330a341b94004e27e8d8b5d..d65e44bb39a972e02323a0a89e005566acc3caef 100644 (file)
@@ -332,7 +332,7 @@ got_root:
                               blocksize == 2048 ? 11 : 12;
 
        /* Find out which kind of FS we have */
-       boot_bh = bread(sb->s_dev, 0, sb->s_blocksize);
+       boot_bh = sb_bread(sb, 0);
        if (!boot_bh) {
                printk(KERN_ERR "AFFS: Cannot read boot block\n");
                goto out_error;
index 5caf04a7b8d163dfc9e50f1f05a20eb584208514..5a887489070b8695f748ddf7e0191caac428f9b5 100644 (file)
@@ -41,7 +41,7 @@ static int bfs_readdir(struct file * f, void * dirent, filldir_t filldir)
        while (f->f_pos < dir->i_size) {
                offset = f->f_pos & (BFS_BSIZE-1);
                block = dir->iu_sblock + (f->f_pos >> BFS_BSIZE_BITS);
-               bh = bread(dev, block, BFS_BSIZE);
+               bh = sb_bread(dir->i_sb, block);
                if (!bh) {
                        f->f_pos += BFS_BSIZE - offset;
                        continue;
@@ -270,7 +270,7 @@ static int bfs_add_entry(struct inode * dir, const char * name, int namelen, int
        sblock = dir->iu_sblock;
        eblock = dir->iu_eblock;
        for (block=sblock; block<=eblock; block++) {
-               bh = bread(dev, block, BFS_BSIZE);
+               bh = sb_bread(dir->i_sb, block);
                if(!bh) 
                        return -ENOSPC;
                for (off=0; off<BFS_BSIZE; off+=BFS_DIRENT_SIZE) {
@@ -319,7 +319,7 @@ static struct buffer_head * bfs_find_entry(struct inode * dir,
        block = offset = 0;
        while (block * BFS_BSIZE + offset < dir->i_size) {
                if (!bh) {
-                       bh = bread(dir->i_dev, dir->iu_sblock + block, BFS_BSIZE);
+                       bh = sb_bread(dir->i_sb, dir->iu_sblock + block);
                        if (!bh) {
                                block++;
                                continue;
index d7a284392225bed3115bc1daab2a87c474d7be0f..bb301b44e42126433de192b78d8ed66e20fa7a2f 100644 (file)
@@ -54,7 +54,7 @@ static int bfs_move_blocks(kdev_t dev, unsigned long start, unsigned long end,
        return 0;
 }
 
-static int bfs_get_block(struct inode * inode, long block, 
+static int bfs_get_block(struct inode * inode, sector_t block, 
        struct buffer_head * bh_result, int create)
 {
        long phys;
index 6a4a4c5487cf3523d62ad1530a424e0aa5d1375f..f83f13f562007a0cc809b97146e9e41ff526e700 100644 (file)
@@ -47,7 +47,7 @@ static void bfs_read_inode(struct inode * inode)
        }
 
        block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
-       bh = bread(dev, block, BFS_BSIZE);
+       bh = sb_bread(inode->i_sb, block);
        if (!bh) {
                printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
                make_bad_inode(inode);
@@ -100,7 +100,7 @@ static void bfs_write_inode(struct inode * inode, int unused)
 
        lock_kernel();
        block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
-       bh = bread(dev, block, BFS_BSIZE);
+       bh = sb_bread(inode->i_sb, block);
        if (!bh) {
                printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
                unlock_kernel();
@@ -153,7 +153,7 @@ static void bfs_delete_inode(struct inode * inode)
        lock_kernel();
        mark_inode_dirty(inode);
        block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
-       bh = bread(dev, block, BFS_BSIZE);
+       bh = sb_bread(s, block);
        if (!bh) {
                printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
                unlock_kernel();
@@ -252,7 +252,7 @@ static struct super_block * bfs_read_super(struct super_block * s,
        s->s_blocksize = BFS_BSIZE;
        s->s_blocksize_bits = BFS_BSIZE_BITS;
 
-       bh = bread(dev, 0, BFS_BSIZE);
+       bh = sb_bread(s, 0);
        if(!bh)
                goto out;
        bfs_sb = (struct bfs_super_block *)bh->b_data;
index 36fe91f4a636a092d4e1fe2b5287e65925d1a6bc..555b7ac14421447b94f20871644905a1d3ddaf26 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -111,7 +111,8 @@ inline void bio_init(struct bio *bio)
        bio->bi_rw = 0;
        bio->bi_vcnt = 0;
        bio->bi_idx = 0;
-       bio->bi_hw_seg = 0;
+       bio->bi_phys_segments = 0;
+       bio->bi_hw_segments = 0;
        bio->bi_size = 0;
        bio->bi_end_io = NULL;
        atomic_set(&bio->bi_cnt, 1);
@@ -166,12 +167,20 @@ void bio_put(struct bio *bio)
        }
 }
 
+inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+{
+       if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
+               blk_recount_segments(q, bio);
+
+       return bio->bi_phys_segments;
+}
+
 inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
 {
-       if (unlikely(!(bio->bi_flags & BIO_SEG_VALID)))
+       if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
                blk_recount_segments(q, bio);
 
-       return bio->bi_hw_seg;
+       return bio->bi_hw_segments;
 }
 
 /**
@@ -199,7 +208,8 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
        bio->bi_vcnt = bio_src->bi_vcnt;
        bio->bi_idx = bio_src->bi_idx;
        if (bio_src->bi_flags & (1 << BIO_SEG_VALID)) {
-               bio->bi_hw_seg = bio_src->bi_hw_seg;
+               bio->bi_phys_segments = bio_src->bi_phys_segments;
+               bio->bi_hw_segments = bio_src->bi_hw_segments;
                bio->bi_flags |= (1 << BIO_SEG_VALID);
        }
        bio->bi_size = bio_src->bi_size;
@@ -496,7 +506,7 @@ static int __init init_bio(void)
        if (!bio_pool)
                panic("bio: can't create mempool\n");
 
-       printk("BIO: pool of %d setup, %uKb (%d bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
+       printk("BIO: pool of %d setup, %ZuKb (%Zd bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
 
        biovec_init_pool();
 
@@ -513,4 +523,5 @@ EXPORT_SYMBOL(bio_init);
 EXPORT_SYMBOL(bio_copy);
 EXPORT_SYMBOL(__bio_clone);
 EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_phys_segments);
 EXPORT_SYMBOL(bio_hw_segments);
index 7582c9dd80a8c8a390f4122906b37bd325487464..7f4afb369346c1f24c259711346bc6ad1864225f 100644 (file)
@@ -153,7 +153,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
 
                bh = NULL;
                if (blocknr + i < devsize) {
-                       bh = getblk(sb->s_dev, blocknr + i, PAGE_CACHE_SIZE);
+                       bh = sb_getblk(sb, blocknr + i);
                        if (!buffer_uptodate(bh))
                                read_array[unread++] = bh;
                }
index 9bba7a2cbfa25bb649f08791944d3dff308ed6c0..cc7df77a034adf533ed483849e06126f0fd9d288 100644 (file)
@@ -40,7 +40,7 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
        /* look at all blocks */
        while (block < inode->i_blocks) {
                /* read the dir block */
-               bh = bread(inode->i_dev, efs_bmap(inode, block), EFS_DIRBSIZE);
+               bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
 
                if (!bh) {
                        printk(KERN_ERR "EFS: readdir(): failed to read dir block %d\n", block);
index 67f58987ead05b2c83c92a45658fd3bc27532859..faa5b9f2c648742c819574e491ed100e91becb3b 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <linux/efs_fs.h>
 
-int efs_get_block(struct inode *inode, long iblock,
+int efs_get_block(struct inode *inode, sector_t iblock,
                  struct buffer_head *bh_result, int create)
 {
        int error = -EROFS;
index 39e503d3fbc8d9b5d94ab991904a008fb84a38e2..67d050f2278395986ae032efac489ea7b010cb74 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/module.h>
 
 
-extern int efs_get_block(struct inode *, long, struct buffer_head *, int);
+extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 static int efs_readpage(struct file *file, struct page *page)
 {
        return block_read_full_page(page,efs_get_block);
@@ -77,7 +77,7 @@ void efs_read_inode(struct inode *inode) {
                        (EFS_BLOCKSIZE / sizeof(struct efs_dinode))) *
                sizeof(struct efs_dinode);
 
-       bh = bread(inode->i_dev, block, EFS_BLOCKSIZE);
+       bh = sb_bread(inode->i_sb, block);
        if (!bh) {
                printk(KERN_WARNING "EFS: bread() failed at block %d\n", block);
                goto read_inode_error;
@@ -271,7 +271,7 @@ efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
                if (first || lastblock != iblock) {
                        if (bh) brelse(bh);
 
-                       bh = bread(inode->i_dev, iblock, EFS_BLOCKSIZE);
+                       bh = sb_bread(inode->i_sb, iblock);
                        if (!bh) {
                                printk(KERN_ERR "EFS: bread() failed at block %d\n", iblock);
                                return 0;
index cc85f5d9a8691033606c72e1b26269b121c28c3a..cc06bc8cbab0259ae248a9b88237eea8b689e06a 100644 (file)
@@ -24,7 +24,7 @@ static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len)
 
        for(block = 0; block < inode->i_blocks; block++) {
 
-               bh = bread(inode->i_dev, efs_bmap(inode, block), EFS_DIRBSIZE);
+               bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
                if (!bh) {
                        printk(KERN_ERR "EFS: find_entry(): failed to read dir block %d\n", block);
                        return 0;
index 79ee8d837a3032d7ae46ad7b1f35dc0ac1d263a2..691f6df84fc3923c1ba4c09506e2638856f2e6d9 100644 (file)
@@ -137,11 +137,14 @@ struct super_block *efs_read_super(struct super_block *s, void *d, int silent) {
        struct buffer_head *bh;
 
        sb = SUPER_INFO(s);
-
+       s->s_magic              = EFS_SUPER_MAGIC;
+       s->s_blocksize          = EFS_BLOCKSIZE;
+       s->s_blocksize_bits     = EFS_BLOCKSIZE_BITS;
        set_blocksize(dev, EFS_BLOCKSIZE);
   
        /* read the vh (volume header) block */
-       bh = bread(dev, 0, EFS_BLOCKSIZE);
+       bh = sb_bread(s, 0);
 
        if (!bh) {
                printk(KERN_ERR "EFS: cannot read volume header\n");
@@ -160,7 +163,7 @@ struct super_block *efs_read_super(struct super_block *s, void *d, int silent) {
                goto out_no_fs_ul;
        }
 
-       bh = bread(dev, sb->fs_start + EFS_SUPER, EFS_BLOCKSIZE);
+       bh = sb_bread(s, sb->fs_start + EFS_SUPER);
        if (!bh) {
                printk(KERN_ERR "EFS: cannot read superblock\n");
                goto out_no_fs_ul;
@@ -174,10 +177,6 @@ struct super_block *efs_read_super(struct super_block *s, void *d, int silent) {
                goto out_no_fs_ul;
        }
        brelse(bh);
-       s->s_magic              = EFS_SUPER_MAGIC;
-       s->s_blocksize          = EFS_BLOCKSIZE;
-       s->s_blocksize_bits     = EFS_BLOCKSIZE_BITS;
 
        if (!(s->s_flags & MS_RDONLY)) {
 #ifdef DEBUG
index b5d17f3cc9544130f3f9f617a32871440747ecff..5dd10f50edb04ef9c26b14e70163926ccc860190 100644 (file)
@@ -26,13 +26,13 @@ static int efs_symlink_readpage(struct file *file, struct page *page)
        lock_kernel();
        /* read first 512 bytes of link target */
        err = -EIO;
-       bh = bread(inode->i_dev, efs_bmap(inode, 0), EFS_BLOCKSIZE);
+       bh = sb_bread(inode->i_sb, efs_bmap(inode, 0));
        if (!bh)
                goto fail;
        memcpy(link, bh->b_data, (size > EFS_BLOCKSIZE) ? EFS_BLOCKSIZE : size);
        brelse(bh);
        if (size > EFS_BLOCKSIZE) {
-               bh = bread(inode->i_dev, efs_bmap(inode, 1), EFS_BLOCKSIZE);
+               bh = sb_bread(inode->i_sb, efs_bmap(inode, 1));
                if (!bh)
                        goto fail;
                memcpy(link + EFS_BLOCKSIZE, bh->b_data, size - EFS_BLOCKSIZE);
index 3d799f7a0daa31ab405e16f666da526365605ff8..da07d94626a7017418e4b0576d51d83a2d187752 100644 (file)
@@ -88,7 +88,7 @@ static int read_block_bitmap (struct super_block * sb,
        if (!gdp)
                goto error_out;
        retval = 0;
-       bh = bread (sb->s_dev, le32_to_cpu(gdp->bg_block_bitmap), sb->s_blocksize);
+       bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
        if (!bh) {
                ext2_error (sb, "read_block_bitmap",
                            "Cannot read block bitmap - "
index bcc088a374c70d9a92cb3b8519162538736d8b56..46e7f2220f573fae4c1e50db446b376a82f40df5 100644 (file)
@@ -51,8 +51,7 @@ static struct buffer_head *read_inode_bitmap (struct super_block * sb,
        if (!desc)
                goto error_out;
 
-       bh = bread(sb->s_dev, le32_to_cpu(desc->bg_inode_bitmap),
-                       sb->s_blocksize);
+       bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
        if (!bh)
                ext2_error (sb, "read_inode_bitmap",
                            "Cannot read inode bitmap - "
index 3665f5ef6a7296ff5779f3b9aabcddcfbc8badfc..e96e1a014f5a049d2b62c5a9853c8106afce0d18 100644 (file)
@@ -239,8 +239,7 @@ static Indirect *ext2_get_branch(struct inode *inode,
                                 Indirect chain[4],
                                 int *err)
 {
-       kdev_t dev = inode->i_dev;
-       int size = inode->i_sb->s_blocksize;
+       struct super_block *sb = inode->i_sb;
        Indirect *p = chain;
        struct buffer_head *bh;
 
@@ -250,7 +249,7 @@ static Indirect *ext2_get_branch(struct inode *inode,
        if (!p->key)
                goto no_block;
        while (--depth) {
-               bh = bread(dev, le32_to_cpu(p->key), size);
+               bh = sb_bread(sb, le32_to_cpu(p->key));
                if (!bh)
                        goto failure;
                /* Reader: pointers */
@@ -399,7 +398,7 @@ static int ext2_alloc_branch(struct inode *inode,
                 * Get buffer_head for parent block, zero it out and set 
                 * the pointer to new one, then send parent to disk.
                 */
-               bh = getblk(inode->i_dev, parent, blocksize);
+               bh = sb_getblk(inode->i_sb, parent);
                lock_buffer(bh);
                memset(bh->b_data, 0, blocksize);
                branch[n].bh = bh;
@@ -763,7 +762,7 @@ static void ext2_free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
                        if (!nr)
                                continue;
                        *p = 0;
-                       bh = bread (inode->i_dev, nr, inode->i_sb->s_blocksize);
+                       bh = sb_bread(inode->i_sb, nr);
                        /*
                         * A read failure? Report error and clear slot
                         * (should be rare).
@@ -921,7 +920,7 @@ void ext2_read_inode (struct inode * inode)
                EXT2_INODE_SIZE(inode->i_sb);
        block = le32_to_cpu(gdp[desc].bg_inode_table) +
                (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
-       if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+       if (!(bh = sb_bread(inode->i_sb, block))) {
                ext2_error (inode->i_sb, "ext2_read_inode",
                            "unable to read inode block - "
                            "inode=%lu, block=%lu", inode->i_ino, block);
@@ -1063,7 +1062,7 @@ static int ext2_update_inode(struct inode * inode, int do_sync)
                EXT2_INODE_SIZE(inode->i_sb);
        block = le32_to_cpu(gdp[desc].bg_inode_table) +
                (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
-       if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+       if (!(bh = sb_bread(inode->i_sb, block))) {
                ext2_error (inode->i_sb, "ext2_write_inode",
                            "unable to read inode block - "
                            "inode=%lu, block=%lu", inode->i_ino, block);
index ee386b01e0ec9115e1b0920e4b6c2f0a1699133c..c9c544458311a6e9b9490441ae3f2ce0190e31fa 100644 (file)
@@ -432,6 +432,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
                printk ("EXT2-fs: unable to set blocksize %d\n", blocksize);
                return NULL;
        }
+       sb->s_blocksize = blocksize;
 
        /*
         * If the superblock doesn't start on a sector boundary,
@@ -443,7 +444,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
                offset = (sb_block*BLOCK_SIZE) % blocksize;
        }
 
-       if (!(bh = bread (dev, logic_sb_block, blocksize))) {
+       if (!(bh = sb_bread(sb, logic_sb_block))) {
                printk ("EXT2-fs: unable to read superblock\n");
                return NULL;
        }
@@ -502,7 +503,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
 
                logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
                offset = (sb_block*BLOCK_SIZE) % blocksize;
-               bh = bread (dev, logic_sb_block, blocksize);
+               bh = sb_bread(sb, logic_sb_block);
                if(!bh) {
                        printk("EXT2-fs: Couldn't read superblock on "
                               "2nd try.\n");
@@ -606,8 +607,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
                goto failed_mount;
        }
        for (i = 0; i < db_count; i++) {
-               sb->u.ext2_sb.s_group_desc[i] = bread (dev, logic_sb_block + i + 1,
-                                                      sb->s_blocksize);
+               sb->u.ext2_sb.s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
                if (!sb->u.ext2_sb.s_group_desc[i]) {
                        for (j = 0; j < i; j++)
                                brelse (sb->u.ext2_sb.s_group_desc[j]);
index db676c0057baa348348f82f7e176d65e417cc37e..f4f87da52c376155550bd78bf4ff0291f98a6c00 100644 (file)
@@ -91,8 +91,7 @@ static int read_block_bitmap (struct super_block * sb,
        if (!gdp)
                goto error_out;
        retval = 0;
-       bh = bread (sb->s_dev,
-                       le32_to_cpu(gdp->bg_block_bitmap), sb->s_blocksize);
+       bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
        if (!bh) {
                ext3_error (sb, "read_block_bitmap",
                            "Cannot read block bitmap - "
@@ -353,8 +352,7 @@ do_more:
 #ifdef CONFIG_JBD_DEBUG
                {
                        struct buffer_head *debug_bh;
-                       debug_bh = get_hash_table(sb->s_dev, block + i,
-                                                       sb->s_blocksize);
+                       debug_bh = sb_get_hash_table(sb, block + i);
                        if (debug_bh) {
                                BUFFER_TRACE(debug_bh, "Deleted!");
                                if (!bh2jh(bitmap_bh)->b_committed_data)
@@ -702,7 +700,7 @@ got_block:
                struct buffer_head *debug_bh;
 
                /* Record bitmap buffer state in the newly allocated block */
-               debug_bh = get_hash_table(sb->s_dev, tmp, sb->s_blocksize);
+               debug_bh = sb_get_hash_table(sb, tmp);
                if (debug_bh) {
                        BUFFER_TRACE(debug_bh, "state when allocated");
                        BUFFER_TRACE2(debug_bh, bh, "bitmap state");
index 088f796731679ca9435ecb62aa00abac8705c948..062ed9374aa5a15bded1dae8bc5af7ac6272c98c 100644 (file)
@@ -60,8 +60,7 @@ static int read_inode_bitmap (struct super_block * sb,
                retval = -EIO;
                goto error_out;
        }
-       bh = bread (sb->s_dev,
-                       le32_to_cpu(gdp->bg_inode_bitmap), sb->s_blocksize);
+       bh = sb_bread(sb, le32_to_cpu(gdp->bg_inode_bitmap));
        if (!bh) {
                ext3_error (sb, "read_inode_bitmap",
                            "Cannot read inode bitmap - "
index ca171a4c4d827a8bcd94cc0859b34d95f81ddf48..b3e997fd5c20865629fd7c20875895e4e4c9360a 100644 (file)
@@ -389,8 +389,7 @@ static int ext3_block_to_path(struct inode *inode, long i_block, int offsets[4])
 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
                                 Indirect chain[4], int *err)
 {
-       kdev_t dev = inode->i_dev;
-       int blocksize = inode->i_sb->s_blocksize;
+       struct super_block *sb = inode->i_sb;
        Indirect *p = chain;
        struct buffer_head *bh;
 
@@ -400,7 +399,7 @@ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
        if (!p->key)
                goto no_block;
        while (--depth) {
-               bh = bread(dev, le32_to_cpu(p->key), blocksize);
+               bh = sb_bread(sb, le32_to_cpu(p->key));
                if (!bh)
                        goto failure;
                /* Reader: pointers */
@@ -558,7 +557,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
                         * and set the pointer to new one, then send
                         * parent to disk.  
                         */
-                       bh = getblk(inode->i_dev, parent, blocksize);
+                       bh = sb_getblk(inode->i_sb, parent);
                        branch[n].bh = bh;
                        lock_buffer(bh);
                        BUFFER_TRACE(bh, "call get_create_access");
@@ -854,8 +853,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
        *errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
        if (!*errp && buffer_mapped(&dummy)) {
                struct buffer_head *bh;
-               bh = getblk(dummy.b_dev, dummy.b_blocknr,
-                                       inode->i_sb->s_blocksize);
+               bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
                if (buffer_new(&dummy)) {
                        J_ASSERT(create != 0);
                        J_ASSERT(handle != 0);
@@ -1549,9 +1547,6 @@ ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
                u32 *first, u32 *last)
 {
        u32 *p;
-       kdev_t dev = inode->i_sb->s_dev;
-       unsigned long blocksize = inode->i_sb->s_blocksize;
-
        if (try_to_extend_transaction(handle, inode)) {
                if (bh) {
                        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
@@ -1577,7 +1572,7 @@ ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
                        struct buffer_head *bh;
 
                        *p = 0;
-                       bh = get_hash_table(dev, nr, blocksize);
+                       bh = sb_get_hash_table(inode->i_sb, nr);
                        ext3_forget(handle, 0, inode, bh, nr);
                }
        }
@@ -1690,7 +1685,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                                continue;               /* A hole */
 
                        /* Go read the buffer for the next level down */
-                       bh = bread(inode->i_dev, nr, inode->i_sb->s_blocksize);
+                       bh = sb_bread(inode->i_sb, nr);
 
                        /*
                         * A read failure? Report error and clear slot
@@ -2003,7 +1998,7 @@ int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
                EXT3_INODE_SIZE(inode->i_sb);
        block = le32_to_cpu(gdp[desc].bg_inode_table) +
                (offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
-       if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+       if (!(bh = sb_bread(inode->i_sb, block))) {
                ext3_error (inode->i_sb, "ext3_get_inode_loc",
                            "unable to read inode block - "
                            "inode=%lu, block=%lu", inode->i_ino, block);
index 9a5e18950dc10048447a0df4421e733d09ddbdff..d7ebe39243f958bf43509e3ffa0274599a83f5b4 100644 (file)
@@ -925,6 +925,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
                goto out_fail;
        }
 
+       sb->s_blocksize = blocksize;
        set_blocksize (dev, blocksize);
 
        /*
@@ -936,7 +937,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
                offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
        }
 
-       if (!(bh = bread (dev, logic_sb_block, blocksize))) {
+       if (!(bh = sb_bread(sb, logic_sb_block))) {
                printk (KERN_ERR "EXT3-fs: unable to read superblock\n");
                goto out_fail;
        }
@@ -1009,7 +1010,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
                set_blocksize (dev, sb->s_blocksize);
                logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
                offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-               bh = bread (dev, logic_sb_block, blocksize);
+               bh = sb_bread(sb, logic_sb_block);
                if (!bh) {
                        printk(KERN_ERR 
                               "EXT3-fs: Can't read superblock on 2nd try.\n");
@@ -1093,8 +1094,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
                goto failed_mount;
        }
        for (i = 0; i < db_count; i++) {
-               sbi->s_group_desc[i] = bread(dev, logic_sb_block + i + 1,
-                                            blocksize);
+               sbi->s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
                if (!sbi->s_group_desc[i]) {
                        printk (KERN_ERR "EXT3-fs: "
                                "can't read group descriptor %d\n", i);
index d8a4d0bdae1d1ad8bf9e2f0ff4a9f4cbdffb3dec..117d85b33ae7e758b591e7ab2b10ee75a1716d0b 100644 (file)
@@ -59,12 +59,12 @@ void fat_ll_rw_block (
 
 struct buffer_head *default_fat_bread(struct super_block *sb, int block)
 {
-       return bread (sb->s_dev, block, sb->s_blocksize);
+       return sb_bread(sb, block);
 }
 
 struct buffer_head *default_fat_getblk(struct super_block *sb, int block)
 {
-       return getblk (sb->s_dev, block, sb->s_blocksize);
+       return sb_getblk(sb, block);
 }
 
 void default_fat_brelse(struct super_block *sb, struct buffer_head *bh)
index bba65eff5898a91e496c4d56f6f08044b43fc9ed..5bbebb08cc5693895eb277eb9d5fedd77c3fd389 100644 (file)
@@ -584,7 +584,7 @@ fat_read_super(struct super_block *sb, void *data, int silent,
 
        sb->s_blocksize = hard_blksize;
        set_blocksize(sb->s_dev, hard_blksize);
-       bh = bread(sb->s_dev, 0, sb->s_blocksize);
+       bh = sb_bread(sb, 0);
        if (bh == NULL) {
                printk("FAT: unable to read boot sector\n");
                goto out_fail;
@@ -656,7 +656,7 @@ fat_read_super(struct super_block *sb, void *data, int silent,
                        (sbi->fsinfo_sector * logical_sector_size) % hard_blksize;
                fsinfo_bh = bh;
                if (fsinfo_block != 0) {
-                       fsinfo_bh = bread(sb->s_dev, fsinfo_block, hard_blksize);
+                       fsinfo_bh = sb_bread(sb, fsinfo_block);
                        if (fsinfo_bh == NULL) {
                                printk("FAT: bread failed, FSINFO block"
                                       " (blocknr = %d)\n", fsinfo_block);
index 979bb3718b49df8a7360e80fa1d16fe527fa12f3..fb3eeeb9f32d9ca639823c9205519b6316833b3c 100644 (file)
@@ -137,9 +137,8 @@ vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
                struct vxfs_typed       *typ;
                int64_t                 off;
 
-               bp = bread(ip->i_dev,
-                               indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)),
-                               ip->i_sb->s_blocksize);
+               bp = sb_bread(ip->i_sb,
+                               indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)));
                if (!buffer_mapped(bp))
                        return 0;
 
index a06f13f3b4a1ca5b0c56acb9f4f8685c36eed14f..363e1ac597276916c1ec58d48d27b94ebc63cec0 100644 (file)
@@ -104,7 +104,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
 
        block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize);
        offset = ((ino % (sbp->s_blocksize / VXFS_ISIZE)) * VXFS_ISIZE);
-       bp = bread(sbp->s_dev, block, sbp->s_blocksize);
+       bp = sb_bread(sbp, block);
 
        if (buffer_mapped(bp)) {
                struct vxfs_inode_info  *vip;
index 7fe6688ec04be3115508027fbe68dc9fa729e0aa..341d5c1a7f4fdf5ae7d9edf9642f97fe6a3f8a24 100644 (file)
@@ -114,7 +114,7 @@ vxfs_bread(struct inode *ip, int block)
        daddr_t                 pblock;
 
        pblock = vxfs_bmap1(ip, block);
-       bp = bread(ip->i_dev, pblock, ip->i_sb->s_blocksize);
+       bp = sb_bread(ip->i_sb, pblock);
 
        return (bp);
 }
@@ -135,7 +135,7 @@ vxfs_bread(struct inode *ip, int block)
  *   Zero on success, else a negativ error code (-EIO).
  */
 static int
-vxfs_getblk(struct inode *ip, long iblock,
+vxfs_getblk(struct inode *ip, sector_t iblock,
            struct buffer_head *bp, int create)
 {
        daddr_t                 pblock;
index fbfdc1b1239645a2056facf071a818800dec124f..d2043ae5137c1fee7593348ebf5c6161bd3070b2 100644 (file)
@@ -61,7 +61,7 @@ struct inode_operations hfs_file_inode_operations = {
 struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
 {
        int tmp;
-       kdev_t dev = fork->entry->mdb->sys_mdb->s_dev;
+       struct super_block *sb = fork->entry->mdb->sys_mdb;
 
        tmp = hfs_extent_map(fork, block, create);
 
@@ -71,7 +71,7 @@ struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
                */
                if (tmp) {
                        hfs_cat_mark_dirty(fork->entry);
-                       return getblk(dev, tmp, HFS_SECTOR_SIZE);
+                       return sb_getblk(sb, tmp);
                }
                return NULL;
        } else {
@@ -80,8 +80,7 @@ struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
                   we waited on the I/O in getblk to complete.
                */
                do {
-                       struct buffer_head *bh =
-                                       getblk(dev, tmp, HFS_SECTOR_SIZE);
+                       struct buffer_head *bh = sb_getblk(sb, tmp);
                        int tmp2 = hfs_extent_map(fork, block, 0);
 
                        if (tmp2 == tmp) {
@@ -107,7 +106,7 @@ struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
  * block number.  This function just calls hfs_extent_map() to do the
  * real work and then stuffs the appropriate info into the buffer_head.
  */
-int hfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+int hfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
        unsigned long phys;
 
index f266bc0c421357a9300fcd25ca84a8f521c763e6..e328a14aeed7657f8f32367cd5f0408625d47da0 100644 (file)
@@ -495,7 +495,7 @@ extern void hfs_extent_adj(struct hfs_fork *);
 extern void hfs_extent_free(struct hfs_fork *);
 
 /* file.c */
-extern int hfs_get_block(struct inode *, long, struct buffer_head *, int);
+extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
 /* mdb.c */
 extern struct hfs_mdb *hfs_mdb_get(hfs_sysmdb, int, hfs_s32);
index a39710f098f7248cf2f221ef3cfaa79e5556c1c3..6d69f71d34d2353c2be5b6387b0f5df7085839df 100644 (file)
@@ -402,6 +402,8 @@ struct super_block *hfs_read_super(struct super_block *s, void *data,
 
        /* set the device driver to 512-byte blocks */
        set_blocksize(dev, HFS_SECTOR_SIZE);
+       s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
+       s->s_blocksize = HFS_SECTOR_SIZE;
 
 #ifdef CONFIG_MAC_PARTITION
        /* check to see if we're in a partition */
@@ -437,8 +439,6 @@ struct super_block *hfs_read_super(struct super_block *s, void *data,
        }
 
        s->s_magic = HFS_SUPER_MAGIC;
-       s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
-       s->s_blocksize = HFS_SECTOR_SIZE;
        s->s_op = &hfs_super_operations;
 
        /* try to get the root inode */
index 4ce747a495ad2f663da34178f241933e00259909..c96107d5fcab21d1b5f0ecd525f0abc8b45f221a 100644 (file)
@@ -41,9 +41,9 @@ hfs_buffer hfs_buffer_get(hfs_sysmdb sys_mdb, int block, int read) {
        hfs_buffer tmp = HFS_BAD_BUFFER;
 
        if (read) {
-               tmp = bread(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+               tmp = sb_bread(sys_mdb, block);
        } else {
-               tmp = getblk(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+               tmp = sb_getblk(sys_mdb, block);
                if (tmp) {
                        mark_buffer_uptodate(tmp, 1);
                }
index c7b63f358c29fe254c19b4f56cb076af79be2144..66067c2a599d7c16fbc6ce7f35acd3c15cce3827 100644 (file)
@@ -122,12 +122,9 @@ void hpfs_unlock_3inodes(struct inode *i1, struct inode *i2, struct inode *i3)
 void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
                 int ahead)
 {
-       kdev_t dev = s->s_dev;
        struct buffer_head *bh;
 
-       if (!ahead || secno + ahead >= s->s_hpfs_fs_size)
-               *bhp = bh = bread(dev, secno, 512);
-       else *bhp = bh = bread(dev, secno, 512);
+       *bhp = bh = sb_bread(s, secno);
        if (bh != NULL)
                return bh->b_data;
        else {
@@ -143,7 +140,7 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
        struct buffer_head *bh;
        /*return hpfs_map_sector(s, secno, bhp, 0);*/
 
-       if ((*bhp = bh = getblk(s->s_dev, secno, 512)) != NULL) {
+       if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
                if (!buffer_uptodate(bh)) wait_on_buffer(bh);
                mark_buffer_uptodate(bh, 1);
                return bh->b_data;
@@ -158,7 +155,6 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
 void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
                   int ahead)
 {
-       kdev_t dev = s->s_dev;
        struct buffer_head *bh;
        char *data;
 
@@ -173,24 +169,22 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
                goto bail;
        }
 
-       if (!ahead || secno + 4 + ahead > s->s_hpfs_fs_size)
-               qbh->bh[0] = bh = bread(dev, secno, 512);
-       else qbh->bh[0] = bh = bread(dev, secno, 512);
+       qbh->bh[0] = bh = sb_bread(s, secno);
        if (!bh)
                goto bail0;
        memcpy(data, bh->b_data, 512);
 
-       qbh->bh[1] = bh = bread(dev, secno + 1, 512);
+       qbh->bh[1] = bh = sb_bread(s, secno + 1);
        if (!bh)
                goto bail1;
        memcpy(data + 512, bh->b_data, 512);
 
-       qbh->bh[2] = bh = bread(dev, secno + 2, 512);
+       qbh->bh[2] = bh = sb_bread(s, secno + 2);
        if (!bh)
                goto bail2;
        memcpy(data + 2 * 512, bh->b_data, 512);
 
-       qbh->bh[3] = bh = bread(dev, secno + 3, 512);
+       qbh->bh[3] = bh = sb_bread(s, secno + 3);
        if (!bh)
                goto bail3;
        memcpy(data + 3 * 512, bh->b_data, 512);
index cefb5b32697704fce6b714daa892094965bd33d5..bce5d5dac34a4275cff90f724429d48492e55201 100644 (file)
@@ -68,7 +68,7 @@ void hpfs_truncate(struct inode *i)
        hpfs_write_inode(i);
 }
 
-int hpfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
        secno s;
        s = hpfs_bmap(inode, iblock);
index e30e6137e143ae7245f8ce2a49396b5e3ca39641..7fa8f7413d541499ee71888522407be6b71a187e 100644 (file)
@@ -259,7 +259,7 @@ int hpfs_open(struct inode *, struct file *);
 int hpfs_file_fsync(struct file *, struct dentry *, int);
 secno hpfs_bmap(struct inode *, unsigned);
 void hpfs_truncate(struct inode *);
-int hpfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create);
+int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create);
 ssize_t hpfs_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos);
 
 /* inode.c */
index 48a358de8417fbb82069291c7a5f4e371bb507b5..29db6b6c01ee0eb01146ebf7eec0913e8aecb5bd 100644 (file)
@@ -123,7 +123,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
                int de_len;
 
                if (!bh) {
-                       bh = isofs_bread(inode, bufsize, block);
+                       bh = isofs_bread(inode, block);
                        if (!bh)
                                return 0;
                }
@@ -158,7 +158,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
                        brelse(bh);
                        bh = NULL;
                        if (offset) {
-                               bh = isofs_bread(inode, bufsize, block);
+                               bh = isofs_bread(inode, block);
                                if (!bh)
                                        return 0;
                                memcpy((void *) tmpde + slop, bh->b_data, offset);
index 305bf8fae044184e094ed82f5b215c329c5b089b..cbf4cabc3989296f80fc68b8c9778c1ed563d960 100644 (file)
@@ -527,6 +527,7 @@ static struct super_block *isofs_read_super(struct super_block *s, void *data,
        }
 
        set_blocksize(dev, opt.blocksize);
+       s->s_blocksize = opt.blocksize;
 
        s->u.isofs_sb.s_high_sierra = high_sierra = 0; /* default is iso9660 */
 
@@ -540,8 +541,8 @@ static struct super_block *isofs_read_super(struct super_block *s, void *data,
            struct iso_volume_descriptor  * vdp;
 
            block = iso_blknum << (ISOFS_BLOCK_BITS-blocksize_bits);
-           if (!(bh = bread(dev, block, opt.blocksize)))
-               goto out_no_read;               
+           if (!(bh = sb_bread(s, block)))
+               goto out_no_read;
 
            vdp = (struct iso_volume_descriptor *)bh->b_data;
            hdp = (struct hs_volume_descriptor *)bh->b_data;
@@ -896,7 +897,6 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
        unsigned int firstext;
        unsigned long nextino;
        int section, rv;
-       unsigned int blocksize = inode->i_sb->s_blocksize;
 
        lock_kernel();
 
@@ -957,7 +957,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
                        (*bh_result)->b_blocknr  = firstext + b_off - offset;
                        (*bh_result)->b_state   |= (1UL << BH_Mapped);
                } else {
-                       *bh_result = getblk(inode->i_dev, firstext+b_off-offset, blocksize);
+                       *bh_result = sb_getblk(inode->i_sb, firstext+b_off-offset);
                        if ( !*bh_result )
                                goto abort;
                }
@@ -1000,12 +1000,12 @@ static int isofs_bmap(struct inode *inode, int block)
        return 0;
 }
 
-struct buffer_head *isofs_bread(struct inode *inode, unsigned int bufsize, unsigned int block)
+struct buffer_head *isofs_bread(struct inode *inode, unsigned int block)
 {
        unsigned int blknr = isofs_bmap(inode, block);
        if (!blknr)
                return NULL;
-       return bread(inode->i_dev, blknr, bufsize);
+       return sb_bread(inode->i_sb, blknr);
 }
 
 static int isofs_readpage(struct file *file, struct page *page)
@@ -1060,7 +1060,7 @@ static int isofs_read_level3_size(struct inode * inode)
                unsigned int de_len;
 
                if (!bh) {
-                       bh = bread(inode->i_dev, block, bufsize);
+                       bh = sb_bread(inode->i_sb, block);
                        if (!bh)
                                goto out_noread;
                }
@@ -1092,7 +1092,7 @@ static int isofs_read_level3_size(struct inode * inode)
                        brelse(bh);
                        bh = NULL;
                        if (offset) {
-                               bh = bread(inode->i_dev, block, bufsize);
+                               bh = sb_bread(inode->i_sb, block);
                                if (!bh)
                                        goto out_noread;
                                memcpy((void *) tmpde + slop, bh->b_data, offset);
@@ -1150,7 +1150,7 @@ static void isofs_read_inode(struct inode * inode)
        unsigned long offset;
        int volume_seq_no, i;
 
-       bh = bread(inode->i_dev, block, bufsize);
+       bh = sb_bread(inode->i_sb, block);
        if (!bh)
                goto out_badread;
 
@@ -1168,7 +1168,7 @@ static void isofs_read_inode(struct inode * inode)
                }
                memcpy(tmpde, bh->b_data + offset, frag1);
                brelse(bh);
-               bh = bread(inode->i_dev, ++block, bufsize);
+               bh = sb_bread(inode->i_sb, ++block);
                if (!bh)
                        goto out_badread;
                memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1);
@@ -1345,7 +1345,7 @@ static void isofs_read_inode(struct inode * inode)
 #ifdef LEAK_CHECK
 #undef malloc
 #undef free_s
-#undef bread
+#undef sb_bread
 #undef brelse
 
 void * leak_check_malloc(unsigned int size){
@@ -1360,9 +1360,9 @@ void leak_check_free_s(void * obj, int size){
   return kfree(obj);
 }
 
-struct buffer_head * leak_check_bread(int dev, int block, int size){
+struct buffer_head * leak_check_bread(struct super_block *sb, int block){
   check_bread++;
-  return bread(dev, block, size);
+  return sb_bread(sb, block);
 }
 
 void leak_check_brelse(struct buffer_head * bh){
index 87fe121f8bbc9ffd437d6d5624aef2f862557618..13e79d4d5193c0aa7dc87f8a228c6ebad27ebdf6 100644 (file)
@@ -78,7 +78,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
                char *dpnt;
 
                if (!bh) {
-                       bh = isofs_bread(dir, bufsize, block);
+                       bh = isofs_bread(dir, block);
                        if (!bh)
                                return 0;
                }
@@ -108,7 +108,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
                        brelse(bh);
                        bh = NULL;
                        if (offset) {
-                               bh = isofs_bread(dir, bufsize, block);
+                               bh = isofs_bread(dir, block);
                                if (!bh)
                                        return 0;
                                memcpy((void *) tmpde + slop, bh->b_data, offset);
index ee1413f7b5269853cdce548ab6da4ee4a826fafb..6906a8ebac09584ed7f640bbd3785e180aa7c972 100644 (file)
@@ -69,7 +69,7 @@
     block = cont_extent; \
     offset = cont_offset; \
     offset1 = 0; \
-    pbh = bread(DEV->i_dev, block, ISOFS_BUFFER_SIZE(DEV)); \
+    pbh = sb_bread(DEV->i_sb, block); \
     if(pbh){       \
       memcpy(buffer + offset1, pbh->b_data + offset, cont_size - offset1); \
       brelse(pbh); \
@@ -511,7 +511,7 @@ static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
 
        block = inode->i_ino >> bufbits;
        lock_kernel();
-       bh = bread(inode->i_dev, block, bufsize);
+       bh = sb_bread(inode->i_sb, block);
        if (!bh)
                goto out_noread;
 
index 491a7fd5d6b2a4db39df7044fe0c19b38ef9582a..6347bb16996d10b1ff061e2f612ac0a1502747e0 100644 (file)
@@ -133,7 +133,7 @@ minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
        ino--;
        block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
                 ino / MINIX_INODES_PER_BLOCK;
-       *bh = bread(sb->s_dev, block, BLOCK_SIZE);
+       *bh = sb_bread(sb, block);
        if (!*bh) {
                printk("unable to read i-node block\n");
                return NULL;
@@ -158,7 +158,7 @@ minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
        ino--;
        block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
                 ino / MINIX2_INODES_PER_BLOCK;
-       *bh = bread(sb->s_dev, block, BLOCK_SIZE);
+       *bh = sb_bread(sb, block);
        if (!*bh) {
                printk("unable to read i-node block\n");
                return NULL;
index fe53b49917ba04ef60a3442099011a28fbd0fee7..5525a48a9164da0be516ed8e155c1b335fd2cc63 100644 (file)
@@ -143,15 +143,15 @@ static struct super_block *minix_read_super(struct super_block *s, void *data,
                goto out_bad_hblock;
 
        set_blocksize(dev, BLOCK_SIZE);
-       if (!(bh = bread(dev,1,BLOCK_SIZE)))
+       s->s_blocksize = BLOCK_SIZE;
+       s->s_blocksize_bits = BLOCK_SIZE_BITS;
+       if (!(bh = sb_bread(s, 1)))
                goto out_bad_sb;
 
        ms = (struct minix_super_block *) bh->b_data;
        sbi->s_ms = ms;
        sbi->s_sbh = bh;
        sbi->s_mount_state = ms->s_state;
-       s->s_blocksize = BLOCK_SIZE;
-       s->s_blocksize_bits = BLOCK_SIZE_BITS;
        sbi->s_ninodes = ms->s_ninodes;
        sbi->s_nzones = ms->s_nzones;
        sbi->s_imap_blocks = ms->s_imap_blocks;
@@ -198,12 +198,12 @@ static struct super_block *minix_read_super(struct super_block *s, void *data,
 
        block=2;
        for (i=0 ; i < sbi->s_imap_blocks ; i++) {
-               if (!(sbi->s_imap[i]=bread(dev,block,BLOCK_SIZE)))
+               if (!(sbi->s_imap[i]=sb_bread(s, block)))
                        goto out_no_bitmap;
                block++;
        }
        for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
-               if (!(sbi->s_zmap[i]=bread(dev,block,BLOCK_SIZE)))
+               if (!(sbi->s_zmap[i]=sb_bread(s, block)))
                        goto out_no_bitmap;
                block++;
        }
index 0aee59b47f3042c124658c0d84113c4b7fc55827..373df9896364fed34c5c013818c9bb5cef2f9366 100644 (file)
@@ -30,7 +30,7 @@ static inline Indirect *get_branch(struct inode *inode,
                                        Indirect chain[DEPTH],
                                        int *err)
 {
-       kdev_t dev = inode->i_dev;
+       struct super_block *sb = inode->i_sb;
        Indirect *p = chain;
        struct buffer_head *bh;
 
@@ -40,7 +40,7 @@ static inline Indirect *get_branch(struct inode *inode,
        if (!p->key)
                goto no_block;
        while (--depth) {
-               bh = bread(dev, block_to_cpu(p->key), BLOCK_SIZE);
+               bh = sb_bread(sb, block_to_cpu(p->key));
                if (!bh)
                        goto failure;
                /* Reader: pointers */
@@ -79,7 +79,7 @@ static int alloc_branch(struct inode *inode,
                if (!nr)
                        break;
                branch[n].key = cpu_to_block(nr);
-               bh = getblk(inode->i_dev, parent, BLOCK_SIZE);
+               bh = sb_getblk(inode->i_sb, parent);
                lock_buffer(bh);
                memset(bh->b_data, 0, BLOCK_SIZE);
                branch[n].bh = bh;
@@ -277,7 +277,7 @@ static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth
                        if (!nr)
                                continue;
                        *p = 0;
-                       bh = bread (inode->i_dev, nr, BLOCK_SIZE);
+                       bh = sb_bread(inode->i_sb, nr);
                        if (!bh)
                                continue;
                        free_branches(inode, (block_t*)bh->b_data,
index d790be367296192c5cd72076b5f60bd029a553ce..bbe12582a7f4ea9a58fdc49076cdb8147a678b7d 100644 (file)
@@ -517,9 +517,11 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
 
        if (mnt) {
                err = graft_tree(mnt, nd);
-               if (err)
+               if (err) {
+                       spin_lock(&dcache_lock);
                        umount_tree(mnt);
-               else
+                       spin_unlock(&dcache_lock);
+               } else
                        mntput(mnt);
        }
 
index 58f543cd6939b35609d9e878d7a4070f21b13254..b72f9100c8556b751cee707ff15333a500df959b 100644 (file)
@@ -52,14 +52,6 @@ static void ncp_add_mem(struct ncp_server *server, const void *source, int size)
        return;
 }
 
-static void ncp_add_mem_fromfs(struct ncp_server *server, const char *source, int size)
-{
-       assert_server_locked(server);
-       copy_from_user(&(server->packet[server->current_size]), source, size);
-       server->current_size += size;
-       return;
-}
-
 static void ncp_add_pstring(struct ncp_server *server, const char *s)
 {
        int len = strlen(s);
index 4533c63dc9100a5b0153d96acc924be84115681e..5076c1dac74239dc7f7294d26b9a67c18f075e6e 100644 (file)
@@ -1023,8 +1023,9 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
                ntfs_error("Unable to set blocksize %d.\n", blocksize);
                goto ntfs_read_super_vol;
        }
+       sb->s_blocksize = blocksize;
        /* Read the super block (boot block). */
-       if (!(bh = bread(sb->s_dev, 0, blocksize))) {
+       if (!(bh = sb_bread(sb, 0))) {
                ntfs_error("Reading super block failed\n");
                goto ntfs_read_super_unl;
        }
@@ -1071,8 +1072,7 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
        if (to_read < 1)
                to_read = 1;
        for (i = 0; i < to_read; i++) {
-               if (!(bh = bread(sb->s_dev, vol->mft_lcn + i,
-                                                         vol->cluster_size))) {
+               if (!(bh = sb_bread(sb, vol->mft_lcn + i))) {
                        ntfs_error("Could not read $Mft record 0\n");
                        goto ntfs_read_super_mft;
                }
index 2f290b4de09c5bd2097478230ace8deb2069325a..d490f2553c63955f42eeda5a0905a699108ddc38 100644 (file)
@@ -169,7 +169,7 @@ int ntfs_getput_clusters(ntfs_volume *vol, int cluster, ntfs_size_t start_offs,
                   buf->do_read ? "get" : "put", cluster, start_offs, length);
        to_copy = vol->cluster_size - start_offs;
        while (length) {
-               if (!(bh = bread(sb->s_dev, cluster, vol->cluster_size))) {
+               if (!(bh = sb_bread(sb, cluster))) {
                        ntfs_debug(DEBUG_OTHER, "%s failed\n",
                                   buf->do_read ? "Reading" : "Writing");
                        error = -EIO;
index 09b94399aaab02ffd37e7aaf25488f0644cd8496..f544a436c822f374d56b3e33d4d204e62bc70cdf 100644 (file)
@@ -69,7 +69,7 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb)
        struct buffer_head *bh;
 
        while (total < size) {
-               if ((bh = bread(sb->s_dev, start + offset, QNX4_BLOCK_SIZE)) == NULL) {
+               if ((bh = sb_bread(sb, start + offset)) == NULL) {
                        printk("qnx4: I/O error in counting free blocks\n");
                        break;
                }
@@ -96,7 +96,7 @@ int qnx4_is_free(struct super_block *sb, long block)
        QNX4DEBUG(("qnx4: is_free requesting block [%lu], bitmap in block [%lu]\n",
                   (unsigned long) block, (unsigned long) start));
        (void) size;            /* CHECKME */
-       bh = bread(sb->s_dev, start, QNX4_BLOCK_SIZE);
+       bh = sb_bread(sb, start);
        if (bh == NULL) {
                return -EIO;
        }
@@ -124,7 +124,7 @@ int qnx4_set_bitmap(struct super_block *sb, long block, int busy)
        QNX4DEBUG(("qnx4: set_bitmap requesting block [%lu], bitmap in block [%lu]\n",
                   (unsigned long) block, (unsigned long) start));
        (void) size;            /* CHECKME */
-       bh = bread(sb->s_dev, start, QNX4_BLOCK_SIZE);
+       bh = sb_bread(sb, start);
        if (bh == NULL) {
                return -EIO;
        }
index ac5d09b91477efb4e038fb01e9c18a76f5de10d2..49df47d8f3ee8849fa2a6776b0fa7dd87a594678 100644 (file)
@@ -36,7 +36,7 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
        while (filp->f_pos < inode->i_size) {
                blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
-               bh = bread(inode->i_dev, blknum, QNX4_BLOCK_SIZE);
+               bh = sb_bread(inode->i_sb, blknum);
                if(bh==NULL) {
                        printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum);
                        break;
index 26c8d77920141283e0c09dc671825e70c78a9811..4ef5de9554e16fb20867d39931e147632178b53b 100644 (file)
@@ -24,8 +24,6 @@
 #include <asm/segment.h>
 #include <asm/system.h>
 
-#define blocksize QNX4_BLOCK_SIZE
-
 /*
  * The functions for qnx4 fs file synchronization.
  */
@@ -40,7 +38,7 @@ static int sync_block(struct inode *inode, unsigned short *block, int wait)
        if (!*block)
                return 0;
        tmp = *block;
-       bh = get_hash_table(inode->i_dev, *block, blocksize);
+       bh = sb_get_hash_table(inode->i_sb, *block);
        if (!bh)
                return 0;
        if (*block != tmp) {
@@ -74,7 +72,7 @@ static int sync_iblock(struct inode *inode, unsigned short *iblock,
        rc = sync_block(inode, iblock, wait);
        if (rc)
                return rc;
-       *bh = bread(inode->i_dev, tmp, blocksize);
+       *bh = sb_bread(inode->i_sb, tmp);
        if (tmp != *iblock) {
                brelse(*bh);
                *bh = NULL;
index 75ad8a8f5d03e9073f4ccd9bd5df03b99a68a712..cfec8ede9c85e6ca14fd06b9940edd270bd5a225 100644 (file)
@@ -95,7 +95,7 @@ static void qnx4_write_inode(struct inode *inode, int unused)
        QNX4DEBUG(("qnx4: write inode 2.\n"));
        block = ino / QNX4_INODES_PER_BLOCK;
        lock_kernel();
-       if (!(bh = bread(inode->i_dev, block, QNX4_BLOCK_SIZE))) {
+       if (!(bh = sb_bread(inode->i_sb, block))) {
                printk("qnx4: major problem: unable to read inode from dev "
                       "%s\n", kdevname(inode->i_dev));
                unlock_kernel();
@@ -162,7 +162,7 @@ struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
        if ( nr >= 0 )
                nr = qnx4_block_map( inode, nr );
        if (nr) {
-               result = getblk(inode->i_dev, nr, QNX4_BLOCK_SIZE);
+               result = sb_getblk(inode->i_sb, nr);
                return result;
        }
        if (!create) {
@@ -173,7 +173,7 @@ struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
        if (!tmp) {
                return NULL;
        }
-       result = getblk(inode->i_dev, tmp, QNX4_BLOCK_SIZE);
+       result = sb_getblk(inode->i_sb, tmp);
        if (tst) {
                qnx4_free_block(inode->i_sb, tmp);
                brelse(result);
@@ -204,7 +204,7 @@ struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
        return NULL;
 }
 
-int qnx4_get_block( struct inode *inode, long iblock, struct buffer_head *bh, int create )
+int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
 {
        unsigned long phys;
 
@@ -243,7 +243,7 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
                while ( --nxtnt > 0 ) {
                        if ( ix == 0 ) {
                                // read next xtnt block.
-                               bh = bread( inode->i_dev, i_xblk - 1, QNX4_BLOCK_SIZE );
+                               bh = sb_bread(inode->i_sb, i_xblk - 1);
                                if ( !bh ) {
                                        QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
                                        return -EIO;
@@ -307,7 +307,7 @@ static const char *qnx4_checkroot(struct super_block *sb)
                rd = le32_to_cpu(sb->u.qnx4_sb.sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
                rl = le32_to_cpu(sb->u.qnx4_sb.sb->RootDir.di_first_xtnt.xtnt_size);
                for (j = 0; j < rl; j++) {
-                       bh = bread(sb->s_dev, rd + j, QNX4_BLOCK_SIZE); /* root dir, first block */
+                       bh = sb_bread(sb, rd + j);      /* root dir, first block */
                        if (bh == NULL) {
                                return "unable to read root entry.";
                        }
@@ -350,7 +350,7 @@ static struct super_block *qnx4_read_super(struct super_block *s,
        /* Check the boot signature. Since the qnx4 code is
           dangerous, we should leave as quickly as possible
           if we don't belong here... */
-       bh = bread(dev, 0, QNX4_BLOCK_SIZE);
+       bh = sb_bread(s, 0);
        if (!bh) {
                printk("qnx4: unable to read the boot sector\n");
                goto outnobh;
@@ -362,7 +362,7 @@ static struct super_block *qnx4_read_super(struct super_block *s,
        }
        brelse(bh);
 
-       bh = bread(dev, 1, QNX4_BLOCK_SIZE);
+       bh = sb_bread(s, 1);
        if (!bh) {
                printk("qnx4: unable to read the superblock\n");
                goto outnobh;
@@ -457,7 +457,7 @@ static void qnx4_read_inode(struct inode *inode)
        }
        block = ino / QNX4_INODES_PER_BLOCK;
 
-       if (!(bh = bread(inode->i_dev, block, QNX4_BLOCK_SIZE))) {
+       if (!(bh = sb_bread(inode->i_sb, block))) {
                printk("qnx4: major problem: unable to read inode from dev "
                       "%s\n", kdevname(inode->i_dev));
                return;
index 4c61dfdeab8ab73f291246b61f6438c30eb60626..5f800a83b41b0716e85f451d92de4527d0622806 100644 (file)
@@ -920,7 +920,7 @@ static int  is_left_neighbor_in_cache(
   /* Get left neighbor block number. */
   n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
   /* Look for the left neighbor in the cache. */
-  if ( (left = get_hash_table(p_s_sb->s_dev, n_left_neighbor_blocknr, p_s_sb->s_blocksize)) ) {
+  if ( (left = sb_get_hash_table(p_s_sb, n_left_neighbor_blocknr)) ) {
 
     RFALSE( buffer_uptodate (left) && ! B_IS_IN_TREE(left),
            "vs-8170: left neighbor (%b %z) is not in the tree", left, left);
index 42148504961f3c6b43e4afaca472207ef8f83e99..65ac678891302baa9ae8488810f4cfcfb91050de 100644 (file)
@@ -1963,7 +1963,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
 //
 // this is exactly what 2.3.99-pre9's ext2_bmap is
 //
-static int reiserfs_aop_bmap(struct address_space *as, sector_t block) {
+static int reiserfs_aop_bmap(struct address_space *as, long block) {
   return generic_block_bmap(as, block, reiserfs_bmap) ;
 }
 
index 2c71fd471adfe1fcf51cee78ddb05461a8e4060b..3b70c989bd708ee322560f618e641eb086e5d479 100644 (file)
@@ -683,7 +683,7 @@ retry:
   count = 0 ;
   for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) {  /* everything but commit_bh */
     bn = reiserfs_get_journal_block(s) + (jl->j_start+i) % JOURNAL_BLOCK_COUNT;
-    tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+    tbh = sb_get_hash_table(s, bn) ;
 
 /* kill this sanity check */
 if (count > (orig_commit_left + 2)) {
@@ -712,7 +712,7 @@ reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_l
     for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && 
                  i < (jl->j_len + 1) ; i++) {  /* everything but commit_bh */
       bn = reiserfs_get_journal_block(s) + (jl->j_start + i) % JOURNAL_BLOCK_COUNT  ;
-      tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+      tbh = sb_get_hash_table(s, bn) ;
 
       wait_on_buffer(tbh) ;
       if (!buffer_uptodate(tbh)) {
@@ -1403,8 +1403,7 @@ static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffe
     offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
 
     /* ok, we have a journal description block, lets see if the transaction was valid */
-    c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT), 
-               p_s_sb->s_blocksize) ;
+    c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
     if (!c_bh)
       return 0 ;
     commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
@@ -1458,7 +1457,7 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
   unsigned long trans_offset ;
   int i;
 
-  d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+  d_bh = sb_bread(p_s_sb, cur_dblock) ;
   if (!d_bh)
     return 1 ;
   desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
@@ -1482,8 +1481,7 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
     brelse(d_bh) ;
     return 1 ;
   }
-  c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT), 
-               p_s_sb->s_blocksize) ;
+  c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
   if (!c_bh) {
     brelse(d_bh) ;
     return 1 ;
@@ -1512,11 +1510,11 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
   }
   /* get all the buffer heads */
   for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
-    log_blocks[i] = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT, p_s_sb->s_blocksize);
+    log_blocks[i] = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT);
     if (i < JOURNAL_TRANS_HALF) {
-      real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(desc->j_realblock[i]), p_s_sb->s_blocksize) ;
+      real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
     } else {
-      real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF]), p_s_sb->s_blocksize) ;
+      real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF])) ;
     }
     if (real_blocks[i]->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
         real_blocks[i]->b_blocknr < (reiserfs_get_journal_block(p_s_sb)+JOURNAL_BLOCK_COUNT)) {
@@ -1617,10 +1615,9 @@ static int journal_read(struct super_block *p_s_sb) {
   ** is the first unflushed, and if that transaction is not valid, 
   ** replay is done
   */
-  SB_JOURNAL(p_s_sb)->j_header_bh = bread(p_s_sb->s_dev
+  SB_JOURNAL(p_s_sb)->j_header_bh = sb_bread(p_s_sb
                                           reiserfs_get_journal_block(p_s_sb) + 
-                                         JOURNAL_BLOCK_COUNT, 
-                                         p_s_sb->s_blocksize) ;
+                                         JOURNAL_BLOCK_COUNT) ;
   if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
     return 1 ;
   }
@@ -1641,7 +1638,7 @@ static int journal_read(struct super_block *p_s_sb) {
     ** there is nothing more we can do, and it makes no sense to read 
     ** through the whole log.
     */
-    d_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset), p_s_sb->s_blocksize) ;
+    d_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
     ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
     if (!ret) {
       continue_replay = 0 ;
@@ -1661,7 +1658,7 @@ static int journal_read(struct super_block *p_s_sb) {
   ** all the valid transactions, and pick out the oldest.
   */
   while(continue_replay && cur_dblock < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
-    d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+    d_bh = sb_bread(p_s_sb, cur_dblock) ;
     ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
     if (ret == 1) {
       desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
@@ -2553,7 +2550,7 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_bloc
   int cleaned = 0 ;
   
   if (reiserfs_dont_log(th->t_super)) {
-    bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+    bh = sb_get_hash_table(p_s_sb, blocknr) ;
     if (bh && buffer_dirty (bh)) {
       printk ("journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr);
       BUG ();
@@ -2561,7 +2558,7 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_bloc
     brelse (bh);
     return 0 ;
   }
-  bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+  bh = sb_get_hash_table(p_s_sb, blocknr) ;
   /* if it is journal new, we just remove it from this transaction */
   if (bh && buffer_journal_new(bh)) {
     mark_buffer_notjournal_new(bh) ;
@@ -2768,7 +2765,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
   
   rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
   /* setup description block */
-  d_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start, p_s_sb->s_blocksize) ; 
+  d_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ; 
   mark_buffer_uptodate(d_bh, 1) ;
   desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
   memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
@@ -2776,9 +2773,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
   desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
 
   /* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
-  c_bh = getblk(p_s_sb->s_dev,  reiserfs_get_journal_block(p_s_sb) + 
-                                       ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT), 
-                                        p_s_sb->s_blocksize) ;
+  c_bh = sb_getblk(p_s_sb,  reiserfs_get_journal_block(p_s_sb) + 
+                                       ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT)) ;
   commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
   memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
   commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
@@ -2866,9 +2862,8 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
     /* copy all the real blocks into log area.  dirty log blocks */
     if (test_bit(BH_JDirty, &cn->bh->b_state)) {
       struct buffer_head *tmp_bh ;
-      tmp_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + 
-                    ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT), 
-                                      p_s_sb->s_blocksize) ;
+      tmp_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + 
+                    ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT)) ;
       mark_buffer_uptodate(tmp_bh, 1) ;
       memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;  
       jindex++ ;
index 9fed2136995aab92e4b57dda64cd1a846cf4fcde..ab7a31036afcbcb610626f126d384a94e3c99e6a 100644 (file)
@@ -39,7 +39,7 @@ int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
        }
 
        /* check the device size */
-       bh = bread(s->s_dev, block_count_new - 1, s->s_blocksize);
+       bh = sb_bread(s, block_count_new - 1);
        if (!bh) {
                printk("reiserfs_resize: can\'t read last block\n");
                return -EINVAL;
index 993e6fe1f3da792007015e2b6f2a7b0693d41803..327bd73117ca2ddef73a67a2cbfe084c6a882b3b 100644 (file)
@@ -1116,7 +1116,7 @@ static char  prepare_for_delete_or_cut(
                    continue;
                }
                /* Search for the buffer in cache. */
-               p_s_un_bh = get_hash_table(p_s_sb->s_dev, get_block_num(p_n_unfm_pointer,0), n_blk_size);
+               p_s_un_bh = sb_get_hash_table(p_s_sb, get_block_num(p_n_unfm_pointer,0));
 
                if (p_s_un_bh) {
                    mark_buffer_clean(p_s_un_bh) ;
index 477d2eb6872b9b49ff3106e597e9915be8a9ab00..9c791cbce08465a1a2663667989530b3f97786e7 100644 (file)
@@ -108,7 +108,7 @@ romfs_read_super(struct super_block *s, void *data, int silent)
        s->u.generic_sbp = (void *) 0;
        s->s_maxbytes = 0xFFFFFFFF;
 
-       bh = bread(dev, 0, ROMBSIZE);
+       bh = sb_bread(s, 0);
        if (!bh) {
                /* XXX merge with other printk? */
                 printk ("romfs: unable to read superblock\n");
@@ -188,7 +188,7 @@ romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count)
        if (count > maxsize || offset+count > maxsize)
                count = maxsize-offset;
 
-       bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+       bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
        if (!bh)
                return -1;              /* error */
 
@@ -203,7 +203,7 @@ romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count)
        while (res < count) {
                offset += maxsize;
 
-               bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+               bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
                if (!bh)
                        return -1;
                maxsize = min_t(unsigned long, count - res, ROMBSIZE);
@@ -226,7 +226,7 @@ romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long
        if (offset >= maxsize || count > maxsize || offset+count>maxsize)
                return -1;
 
-       bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+       bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
        if (!bh)
                return -1;              /* error */
 
@@ -241,7 +241,7 @@ romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long
                offset += maxsize;
                dest += maxsize;
 
-               bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+               bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
                if (!bh)
                        return -1;
                maxsize = min_t(unsigned long, count - res, ROMBSIZE);
index 1d76bb94148ffb333f0b4fdb84712daacfe281b3..2f3df11dd2079ce02384ae6e43b2ef6cde79d033 100644 (file)
@@ -73,7 +73,7 @@ void sysv_free_block(struct super_block * sb, u32 nr)
         */
        if (count == sb->sv_flc_size || count == 0) {
                block += sb->sv_block_base;
-               bh = getblk(sb->s_dev, block, sb->s_blocksize);
+               bh = sb_getblk(sb, block);
                if (!bh) {
                        printk("sysv_free_block: getblk() failed\n");
                        unlock_super(sb);
@@ -125,7 +125,7 @@ u32 sysv_new_block(struct super_block * sb)
                unsigned count;
 
                block += sb->sv_block_base;
-               if (!(bh = bread(sb->s_dev, block, sb->s_blocksize))) {
+               if (!(bh = sb_bread(sb, block))) {
                        printk("sysv_new_block: cannot read free-list block\n");
                        /* retry this same block next time */
                        *sb->sv_bcache_count = cpu_to_fs16(sb, 1);
@@ -196,7 +196,7 @@ unsigned long sysv_count_free_blocks(struct super_block * sb)
                if (block < sb->sv_firstdatazone || block >= sb->sv_nzones)
                        goto Einval;
                block += sb->sv_block_base;
-               bh = bread(sb->s_dev, block, sb->s_blocksize);
+               bh = sb_bread(sb, block);
                if (!bh)
                        goto Eio;
                n = fs16_to_cpu(sb, *(u16*)bh->b_data);
index a91224c578e5f0e6355d79765159a3d56a864fb8..474e67ec6501b4389f1fb966d915a69551ef10cc 100644 (file)
@@ -55,7 +55,7 @@ sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh)
        struct sysv_inode *res;
        int block = sb->sv_firstinodezone + sb->sv_block_base;
        block += (ino-1) >> sb->sv_inodes_per_block_bits;
-       *bh = bread(sb->s_dev, block, sb->s_blocksize);
+       *bh = sb_bread(sb, block);
        if (!*bh)
                return NULL;
        res = (struct sysv_inode *) (*bh)->b_data;
index af27d22104eeb4cf6ad1bf499d313b29deec75af..5d4ef29793a81d5c60eaf81f787a27b4d6116c46 100644 (file)
@@ -86,8 +86,7 @@ static Indirect *get_branch(struct inode *inode,
                            Indirect chain[],
                            int *err)
 {
-       kdev_t dev = inode->i_dev;
-       int size = inode->i_sb->s_blocksize;
+       struct super_block *sb = inode->i_sb;
        Indirect *p = chain;
        struct buffer_head *bh;
 
@@ -96,8 +95,8 @@ static Indirect *get_branch(struct inode *inode,
        if (!p->key)
                goto no_block;
        while (--depth) {
-               int block = block_to_cpu(inode->i_sb, p->key);
-               bh = bread(dev, block, size);
+               int block = block_to_cpu(sb, p->key);
+               bh = sb_bread(sb, block);
                if (!bh)
                        goto failure;
                if (!verify_chain(chain, p))
@@ -139,7 +138,7 @@ static int alloc_branch(struct inode *inode,
                 * the pointer to new one, then send parent to disk.
                 */
                parent = block_to_cpu(inode->i_sb, branch[n-1].key);
-               bh = getblk(inode->i_dev, parent, blocksize);
+               bh = sb_getblk(inode->i_sb, parent);
                lock_buffer(bh);
                memset(bh->b_data, 0, blocksize);
                branch[n].bh = bh;
@@ -192,7 +191,7 @@ changed:
        return -EAGAIN;
 }
 
-static int get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
        int err = -EIO;
        int offsets[DEPTH];
@@ -336,7 +335,7 @@ static void free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
                                continue;
                        *p = 0;
                        block = block_to_cpu(sb, nr);
-                       bh = bread(inode->i_dev, block, sb->s_blocksize);
+                       bh = sb_bread(sb, block);
                        if (!bh)
                                continue;
                        free_branches(inode, (u32*)bh->b_data,
index 983f0be5b925ce512e1410b182e919df31a176ea..2a38452d7f1a6159683275fabacb1675414decc3 100644 (file)
@@ -362,11 +362,12 @@ static struct super_block *sysv_read_super(struct super_block *sb,
        if (64 != sizeof (struct sysv_inode))
                panic("sysv fs: bad i-node size");
        set_blocksize(dev,BLOCK_SIZE);
+       sb->s_blocksize = BLOCK_SIZE;
        sb->sv_block_base = 0;
 
        for (i = 0; i < sizeof(flavours)/sizeof(flavours[0]) && !size; i++) {
                brelse(bh);
-               bh = bread(dev, flavours[i].block, BLOCK_SIZE);
+               bh = sb_bread(sb, flavours[i].block);
                if (!bh)
                        continue;
                size = flavours[i].test(sb, bh);
@@ -380,8 +381,9 @@ static struct super_block *sysv_read_super(struct super_block *sb,
                        blocknr = bh->b_blocknr << 1;
                        brelse(bh);
                        set_blocksize(dev, 512);
-                       bh1 = bread(dev, blocknr, 512);
-                       bh = bread(dev, blocknr + 1, 512);
+                       sb->s_blocksize = 512;
+                       bh1 = sb_bread(sb, blocknr);
+                       bh = sb_bread(sb, blocknr + 1);
                        break;
                case 2:
                        bh1 = bh;
@@ -390,7 +392,8 @@ static struct super_block *sysv_read_super(struct super_block *sb,
                        blocknr = bh->b_blocknr >> 1;
                        brelse(bh);
                        set_blocksize(dev, 2048);
-                       bh1 = bh = bread(dev, blocknr, 2048);
+                       sb->s_blocksize = 2048;
+                       bh1 = bh = sb_bread(sb, blocknr);
                        break;
                default:
                        goto Ebadsize;
@@ -441,8 +444,9 @@ static struct super_block *v7_read_super(struct super_block *sb,void *data,
        sb->sv_bytesex = BYTESEX_PDP;
 
        set_blocksize(dev, 512);
+       sb->s_blocksize = 512;
 
-       if ((bh = bread(dev, 1, 512)) == NULL) {
+       if ((bh = sb_bread(sb, 1)) == NULL) {
                if (!silent)
                        printk("VFS: unable to read V7 FS superblock on "
                               "device %s.\n", bdevname(dev));
@@ -458,7 +462,7 @@ static struct super_block *v7_read_super(struct super_block *sb,void *data,
 
        /* plausibility check on root inode: it is a directory,
           with a nonzero size that is a multiple of 16 */
-       if ((bh2 = bread(dev, 2, 512)) == NULL)
+       if ((bh2 = sb_bread(sb, 2)) == NULL)
                goto failed;
        v7i = (struct sysv_inode *)(bh2->b_data + 64);
        if ((fs16_to_cpu(sb,v7i->i_mode) & ~0777) != S_IFDIR ||
index 982a7f808620b277545a0d6f076dae64b111a78c..8dc09bf9600800858f694032366ce5f32382bcf1 100644 (file)
@@ -98,7 +98,7 @@ static int read_block_bitmap(struct super_block * sb,
        loc.logicalBlockNum = bitmap->s_extPosition;
        loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
 
-       bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block), sb->s_blocksize);
+       bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
        if (!bh)
        {
                retval = -EIO;
@@ -463,7 +463,7 @@ static void udf_table_free_blocks(struct super_block * sb,
        elen = 0;
        obloc = nbloc = UDF_I_LOCATION(table);
 
-       obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0), sb->s_blocksize);
+       obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0));
        atomic_inc(&nbh->b_count);
 
        while (count && (etype =
@@ -571,8 +571,7 @@ static void udf_table_free_blocks(struct super_block * sb,
                        elen -= sb->s_blocksize;
 
                        if (!(nbh = udf_tread(sb,
-                               udf_get_lb_pblock(sb, nbloc, 0),
-                               sb->s_blocksize)))
+                               udf_get_lb_pblock(sb, nbloc, 0))))
                        {
                                udf_release_data(obh);
                                goto error_return;
@@ -689,7 +688,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
        extoffset = sizeof(struct UnallocatedSpaceEntry);
        bloc = UDF_I_LOCATION(table);
 
-       bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0), sb->s_blocksize);
+       bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
        eloc.logicalBlockNum = 0xFFFFFFFF;
 
        while (first_block != eloc.logicalBlockNum && (etype =
@@ -766,7 +765,7 @@ static int udf_table_new_block(struct super_block * sb,
        extoffset = sizeof(struct UnallocatedSpaceEntry);
        bloc = UDF_I_LOCATION(table);
 
-       goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0), sb->s_blocksize);
+       goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
        atomic_inc(&goal_bh->b_count);
 
        while (spread && (etype =
index 7a13d861ccfdd7c77c0f73b0b366cb86eb9fe738..f1dd42b3c63af92a0411372283c7df66eacb99aa 100644 (file)
@@ -146,7 +146,7 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
                return -ENOENT;
        }
 
-       if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+       if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
        {
                udf_release_data(bh);
                return -EIO;
@@ -160,7 +160,7 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
                for (num=0; i>0; i--)
                {
                        block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
-                       tmp = udf_tgetblk(dir->i_sb, block, dir->i_sb->s_blocksize);
+                       tmp = udf_tgetblk(dir->i_sb, block);
                        if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
                                bha[num++] = tmp;
                        else
index 33f5cf0b09c2684e892e815c1cad224b9601ad57..97ebc7e6d9600765dec0c280d48fe81086c85742 100644 (file)
@@ -60,7 +60,7 @@ Uint8 * udf_filead_read(struct inode *dir, Uint8 *tmpad, Uint8 ad_size,
                block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
                if (!block)
                        return NULL;
-               if (!(*bh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+               if (!(*bh = udf_tread(dir->i_sb, block)))
                        return NULL;
        }
        else if (*offset > dir->i_sb->s_blocksize)
@@ -74,7 +74,7 @@ Uint8 * udf_filead_read(struct inode *dir, Uint8 *tmpad, Uint8 ad_size,
                block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
                if (!block)
                        return NULL;
-               if (!((*bh) = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+               if (!((*bh) = udf_tread(dir->i_sb, block)))
                        return NULL;
 
                memcpy((Uint8 *)ad + remainder, (*bh)->b_data, ad_size - remainder);
@@ -117,7 +117,7 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                        *extoffset = lextoffset;
 
                udf_release_data(fibh->sbh);
-               if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+               if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
                        return NULL;
                fibh->soffset = fibh->eoffset = 0;
 
@@ -129,7 +129,7 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                        for (num=0; i>0; i--)
                        {
                                block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset+i);
-                               tmp = udf_tgetblk(dir->i_sb, block, dir->i_sb->s_blocksize);
+                               tmp = udf_tgetblk(dir->i_sb, block);
                                if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
                                        bha[num++] = tmp;
                                else
@@ -183,7 +183,7 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                fibh->soffset -= dir->i_sb->s_blocksize;
                fibh->eoffset -= dir->i_sb->s_blocksize;
 
-               if (!(fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+               if (!(fibh->ebh = udf_tread(dir->i_sb, block)))
                        return NULL;
 
                if (sizeof(struct FileIdentDesc) > - fibh->soffset)
index 33ff393177dc8309408373d7cc3276d01cd3eab2..72ce8252793f8094364d46e7ad27757726d6e7b2 100644 (file)
@@ -57,7 +57,7 @@ static int udf_adinicb_readpage(struct file *file, struct page * page)
        kaddr = kmap(page);
        memset(kaddr, 0, PAGE_CACHE_SIZE);
        block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-       bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+       bh = sb_bread(inode->i_sb, block);
        memcpy(kaddr, bh->b_data + udf_ext0_offset(inode), inode->i_size);
        brelse(bh);
        flush_dcache_page(page);
@@ -80,7 +80,7 @@ static int udf_adinicb_writepage(struct page *page)
 
        kaddr = kmap(page);
        block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-       bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+       bh = sb_bread(inode->i_sb, block);
        memcpy(bh->b_data + udf_ext0_offset(inode), kaddr, inode->i_size);
        mark_buffer_dirty(bh);
        brelse(bh);
@@ -105,7 +105,7 @@ static int udf_adinicb_commit_write(struct file *file, struct page *page, unsign
        char *kaddr = page_address(page);
 
        block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-       bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+       bh = sb_bread(inode->i_sb, block);
        memcpy(bh->b_data + udf_file_entry_alloc_offset(inode) + offset,
                kaddr + offset, to-offset);
        mark_buffer_dirty(bh);
@@ -246,8 +246,7 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 
        /* ok, we need to read the inode */
        bh = udf_tread(inode->i_sb,
-               udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
-               inode->i_sb->s_blocksize);
+               udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
 
        if (!bh)
        {
index 48431cec2bee8f3d9d5924af85cd36a7d8a59cdf..7b95bf925ef8f0dd07a5b866edacf302349bf91d 100644 (file)
@@ -184,7 +184,7 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
        }
 
        block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-       bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+       bh = udf_tread(inode->i_sb, block);
        if (!bh)
                return;
        page = grab_cache_page(inode->i_mapping, 0);
@@ -251,10 +251,10 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
                UDF_I_LOCATION(inode).partitionReferenceNum, 0);
        if (!newblock)
                return NULL;
-       sbh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+       sbh = udf_tread(inode->i_sb, inode->i_ino);
        if (!sbh)
                return NULL;
-       dbh = udf_tgetblk(inode->i_sb, newblock, inode->i_sb->s_blocksize);
+       dbh = udf_tgetblk(inode->i_sb, newblock);
        if (!dbh)
                return NULL;
        lock_buffer(dbh);
@@ -382,7 +382,7 @@ struct buffer_head * udf_getblk(struct inode * inode, long block,
        if (!*err && buffer_mapped(&dummy))
        {
                struct buffer_head *bh;
-               bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
+               bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
                if (buffer_new(&dummy))
                {
                        lock_buffer(bh);
@@ -886,8 +886,7 @@ void udf_truncate(struct inode * inode)
                                udf_file_entry_alloc_offset(inode);
 
                        if ((bh = udf_tread(inode->i_sb,
-                               udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
-                               inode->i_sb->s_blocksize)))
+                               udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
                        {
                                memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
                                mark_buffer_dirty(bh);
@@ -1322,8 +1321,7 @@ udf_update_inode(struct inode *inode, int do_sync)
        int err = 0;
 
        bh = udf_tread(inode->i_sb,
-               udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
-               inode->i_sb->s_blocksize);
+               udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
 
        if (!bh)
        {
@@ -1624,8 +1622,7 @@ Sint8 udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
        if (!(*bh))
        {
                if (!(*bh = udf_tread(inode->i_sb,
-                       udf_get_lb_pblock(inode->i_sb, *bloc, 0),
-                       inode->i_sb->s_blocksize)))
+                       udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
                {
                        udf_debug("reading block %d failed!\n",
                                udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1653,7 +1650,7 @@ Sint8 udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
                        return -1;
                }
                if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
-                       *bloc, 0), inode->i_sb->s_blocksize)))
+                       *bloc, 0))))
                {
                        return -1;
                }
@@ -1759,8 +1756,7 @@ Sint8 udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
        if (!(bh))
        {
                if (!(bh = udf_tread(inode->i_sb,
-                       udf_get_lb_pblock(inode->i_sb, bloc, 0),
-                       inode->i_sb->s_blocksize)))
+                       udf_get_lb_pblock(inode->i_sb, bloc, 0))))
                {
                        udf_debug("reading block %d failed!\n",
                                udf_get_lb_pblock(inode->i_sb, bloc, 0));
@@ -1828,8 +1824,7 @@ Sint8 udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
        if (!(*bh))
        {
                if (!(*bh = udf_tread(inode->i_sb,
-                       udf_get_lb_pblock(inode->i_sb, *bloc, 0),
-                       inode->i_sb->s_blocksize)))
+                       udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
                {
                        udf_debug("reading block %d failed!\n",
                                udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1951,8 +1946,7 @@ Sint8 udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
        if (!(*bh))
        {
                if (!(*bh = udf_tread(inode->i_sb,
-                       udf_get_lb_pblock(inode->i_sb, *bloc, 0),
-                       inode->i_sb->s_blocksize)))
+                       udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
                {
                        udf_debug("reading block %d failed!\n",
                                udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -2033,8 +2027,7 @@ Sint8 udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
        if (!bh)
        {
                if (!(bh = udf_tread(inode->i_sb,
-                       udf_get_lb_pblock(inode->i_sb, bloc, 0),
-                       inode->i_sb->s_blocksize)))
+                       udf_get_lb_pblock(inode->i_sb, bloc, 0))))
                {
                        udf_debug("reading block %d failed!\n",
                                udf_get_lb_pblock(inode->i_sb, bloc, 0));
@@ -2068,8 +2061,7 @@ Sint8 udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
        if (!(nbh))
        {
                if (!(nbh = udf_tread(inode->i_sb,
-                       udf_get_lb_pblock(inode->i_sb, nbloc, 0),
-                       inode->i_sb->s_blocksize)))
+                       udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
                {
                        udf_debug("reading block %d failed!\n",
                                udf_get_lb_pblock(inode->i_sb, nbloc, 0));
index 4b9cb0ef1bd5ed854e0731300fb970cb920c3e2b..3cbebf75fac7eea605153f9fd19376ff9f68c3ee 100644 (file)
@@ -67,21 +67,21 @@ udf64_high32(Uint64 indat)
 #if defined(__linux__) && defined(__KERNEL__)
 
 extern struct buffer_head *
-udf_tgetblk(struct super_block *sb, int block, int size)
+udf_tgetblk(struct super_block *sb, int block)
 {
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
-               return getblk(sb->s_dev, udf_fixed_to_variable(block), size);
+               return sb_getblk(sb, udf_fixed_to_variable(block));
        else
-               return getblk(sb->s_dev, block, size);
+               return sb_getblk(sb, block);
 }
 
 extern struct buffer_head *
-udf_tread(struct super_block *sb, int block, int size)
+udf_tread(struct super_block *sb, int block)
 {
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
-               return bread(sb->s_dev, udf_fixed_to_variable(block), size);
+               return sb_bread(sb, udf_fixed_to_variable(block));
        else
-               return bread(sb->s_dev, block, size);
+               return sb_bread(sb, block);
 }
 
 extern struct GenericAttrFormat *
@@ -92,7 +92,7 @@ udf_add_extendedattr(struct inode * inode, Uint32 size, Uint32 type,
        long_ad eaicb;
        int offset;
 
-       *bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+       *bh = udf_tread(inode->i_sb, inode->i_ino);
 
        if (UDF_I_EXTENDED_FE(inode) == 0)
        {
@@ -208,7 +208,7 @@ udf_get_extendedattr(struct inode * inode, Uint32 type, Uint8 subtype,
        long_ad eaicb;
        Uint32 offset;
 
-       *bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+       *bh = udf_tread(inode->i_sb, inode->i_ino);
 
        if (UDF_I_EXTENDED_FE(inode) == 0)
        {
@@ -273,7 +273,7 @@ udf_read_untagged(struct super_block *sb, Uint32 block, Uint32 offset)
        struct buffer_head *bh = NULL;
 
        /* Read the block */
-       bh = udf_tread(sb, block+offset, sb->s_blocksize);
+       bh = udf_tread(sb, block+offset);
        if (!bh)
        {
                printk(KERN_ERR "udf: udf_read_untagged(%p,%d,%d) failed\n",
@@ -305,7 +305,7 @@ udf_read_tagged(struct super_block *sb, Uint32 block, Uint32 location, Uint16 *i
        if (block == 0xFFFFFFFF)
                return NULL;
 
-       bh = udf_tread(sb, block, sb->s_blocksize);
+       bh = udf_tread(sb, block);
        if (!bh)
        {
                udf_debug("block=%d, location=%d: read failed\n", block, location);
index 7b6f0a6745781b446416455a41274fa7b80e0c93..b36093c816898393faa158f214577e87cfadc93f 100644 (file)
@@ -183,7 +183,7 @@ udf_find_entry(struct inode *dir, struct dentry *dentry,
                return NULL;
        }
 
-       if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+       if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
        {
                udf_release_data(bh);
                return NULL;
@@ -404,7 +404,7 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
                else
                        offset = 0;
 
-               if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+               if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
                {
                        udf_release_data(bh);
                        *err = -EIO;
@@ -488,7 +488,7 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
                block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
                if (UDF_I_ALLOCTYPE(dir) == ICB_FLAG_AD_IN_ICB)
                {
-                       fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize);
+                       fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
                        fibh->soffset = fibh->eoffset = udf_file_entry_alloc_offset(dir);
                }
                else
@@ -803,7 +803,7 @@ static int empty_dir(struct inode *dir)
                return 0;
        }
 
-       if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+       if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
                return 0;
 
        while ( (f_pos < size) )
@@ -964,7 +964,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
 
                block = udf_get_pblock(inode->i_sb, block,
                        UDF_I_LOCATION(inode).partitionReferenceNum, 0);
-               bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+               bh = udf_tread(inode->i_sb, block);
                lock_buffer(bh);
                memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
                mark_buffer_uptodate(bh, 1);
@@ -974,7 +974,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
        else
        {
                block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-               bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+               bh = udf_tread(inode->i_sb, block);
        }
        ea = bh->b_data + udf_ext0_offset(inode);
 
index 933f2db7606359135c2a97b5d964fbe4c773054e..39382845c64fa748993088b10258d26125605067 100644 (file)
@@ -76,7 +76,7 @@ Uint32 udf_get_pblock_virt15(struct super_block *sb, Uint32 block, Uint16 partit
 
        loc = udf_block_map(UDF_SB_VAT(sb), newblock);
 
-       if (!(bh = bread(sb->s_dev, loc, sb->s_blocksize)))
+       if (!(bh = sb_bread(sb, loc)))
        {
                udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
                        sb, block, partition, loc, index);
index 034064dfdf202c8bfd783dae006773569e68fb19..7afbe3af2dc3cdd70f1f9f792cf0dfb5656f7f8c 100644 (file)
@@ -412,7 +412,7 @@ udf_vrs(struct super_block *sb, int silent)
        for (;!nsr02 && !nsr03; sector += sectorsize)
        {
                /* Read a block */
-               bh = udf_tread(sb, sector >> sb->s_blocksize_bits, sb->s_blocksize);
+               bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
                if (!bh)
                        break;
 
@@ -525,7 +525,7 @@ udf_find_anchor(struct super_block *sb)
 
                for (i=0; (!lastblock && i<sizeof(last)/sizeof(int)); i++)
                {
-                       if (last[i] < 0 || !(bh = bread(sb->s_dev, last[i], sb->s_blocksize)))
+                       if (last[i] < 0 || !(bh = sb_bread(sb, last[i])))
                        {
                                ident = location = 0;
                        }
@@ -560,7 +560,7 @@ udf_find_anchor(struct super_block *sb)
                        }
                        else
                        {
-                               if (last[i] < 256 || !(bh = bread(sb->s_dev, last[i] - 256, sb->s_blocksize)))
+                               if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256)))
                                {
                                        ident = location = 0;
                                }
@@ -579,8 +579,7 @@ udf_find_anchor(struct super_block *sb)
                                }
                                else
                                {
-                                       if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = bread(sb->s_dev, last[i] - 312 - UDF_SB_SESSION(sb),
-                                               sb->s_blocksize)))
+                                       if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb))))
                                        {
                                                ident = location = 0;
                                        }
@@ -606,7 +605,7 @@ udf_find_anchor(struct super_block *sb)
        if (!lastblock)
        {
                /* We havn't found the lastblock. check 312 */
-               if ((bh = bread(sb->s_dev, 312 + UDF_SB_SESSION(sb), sb->s_blocksize)))
+               if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb))))
                {
                        ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
                        location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
@@ -1258,7 +1257,7 @@ udf_load_partition(struct super_block *sb, lb_addr *fileset)
                                        Uint32 pos;
 
                                        pos = udf_block_map(UDF_SB_VAT(sb), 0);
-                                       bh = bread(sb->s_dev, pos, sb->s_blocksize);
+                                       bh = sb_bread(sb, pos);
                                        UDF_SB_TYPEVIRT(sb,i).s_start_offset =
                                                le16_to_cpu(((struct VirtualAllocationTable20 *)bh->b_data + udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) +
                                                        udf_ext0_offset(UDF_SB_VAT(sb));
@@ -1728,7 +1727,7 @@ udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
                {
                        udf_release_data(bh);
                        newblock = udf_get_lb_pblock(sb, loc, ++block);
-                       bh = udf_tread(sb, newblock, sb->s_blocksize);
+                       bh = udf_tread(sb, newblock);
                        if (!bh)
                        {
                                udf_debug("read failed\n");
index 543e9b45ecfad858989382cd118fe73183690c91..3254e530faf96defb3a215cc29029722f10f2154 100644 (file)
@@ -88,7 +88,7 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        lock_kernel();
        if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
        {
-               bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+               bh = udf_tread(inode->i_sb, inode->i_ino);
 
                if (!bh)
                        goto out;
@@ -97,8 +97,7 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        }
        else
        {
-               bh = bread(inode->i_dev, udf_block_map(inode, 0),
-                               inode->i_sb->s_blocksize);
+               bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
 
                if (!bh)
                        goto out;
index 97dabb0ba6734d756cbe4e76490c138b1b338cf4..56d80d31ce079ed32d10276f8cb60c8b9e2dc937 100644 (file)
@@ -139,8 +139,8 @@ extern void udf_discard_prealloc(struct inode *);
 
 /* misc.c */
 extern int udf_read_tagged_data(char *, int size, int fd, int block, int partref);
-extern struct buffer_head *udf_tgetblk(struct super_block *, int, int);
-extern struct buffer_head *udf_tread(struct super_block *, int, int);
+extern struct buffer_head *udf_tgetblk(struct super_block *, int);
+extern struct buffer_head *udf_tread(struct super_block *, int);
 extern struct GenericAttrFormat *udf_add_extendedattr(struct inode *, Uint32, Uint32, Uint8, struct buffer_head **);
 extern struct GenericAttrFormat *udf_get_extendedattr(struct inode *, Uint32, Uint8, struct buffer_head **);
 extern struct buffer_head *udf_read_tagged(struct super_block *, Uint32, Uint32, Uint16 *);
index 38083eab40fc1c5f4f2ee1e84108b4a0c00a86cc..31c1bdd0ed466a30eb3649cd2908d0168ca9a3f7 100644 (file)
@@ -223,7 +223,7 @@ failed:
 
 #define NULLIFY_FRAGMENTS \
        for (i = oldcount; i < newcount; i++) { \
-               bh = getblk (sb->s_dev, result + i, sb->s_blocksize); \
+               bh = sb_getblk(sb, result + i); \
                memset (bh->b_data, 0, sb->s_blocksize); \
                mark_buffer_uptodate(bh, 1); \
                mark_buffer_dirty (bh); \
@@ -357,7 +357,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
        result = ufs_alloc_fragments (inode, cgno, goal, request, err);
        if (result) {
                for (i = 0; i < oldcount; i++) {
-                       bh = bread (sb->s_dev, tmp + i, sb->s_blocksize);
+                       bh = sb_bread(sb, tmp + i);
                        if(bh)
                        {
                                mark_buffer_clean (bh);
index 16aa991954660483babcf2779fc10bf2b11ba1ec..97391b4d66c02b6cb152a58a3e9f52b339810892 100644 (file)
@@ -54,7 +54,7 @@ static void ufs_read_cylinder (struct super_block * sb,
         */
        UCPI_UBH->bh[0] = sb->u.ufs_sb.s_ucg[cgno];
        for (i = 1; i < UCPI_UBH->count; i++)
-               if (!(UCPI_UBH->bh[i] = bread (sb->s_dev, UCPI_UBH->fragment + i, sb->s_blocksize)))
+               if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
                        goto failed;
        sb->u.ufs_sb.s_cgno[bitmap_nr] = cgno;
                        
index f333e5a2f0ade9c73d88877c6721f486499ff7d8..3dca14b360e940f01a532a7de61ae49d8f7530af 100644 (file)
@@ -74,7 +74,7 @@ ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
        while (!error && !stored && filp->f_pos < inode->i_size) {
                lblk = (filp->f_pos) >> sb->s_blocksize_bits;
                blk = ufs_frag_map(inode, lblk);
-               if (!blk || !(bh = bread (sb->s_dev, blk, sb->s_blocksize))) {
+               if (!blk || !(bh = sb_bread(sb, blk))) {
                        /* XXX - error - skip to the next block */
                        printk("ufs_readdir: "
                               "dir inode %lu has a hole at offset %lu\n",
index cff561ab9b5fb88512bfee081fc1fbd3f40ea774..5c3bc8f2314648b160bba18f5c1f40ce15966dcf 100644 (file)
@@ -106,8 +106,7 @@ int ufs_frag_map(struct inode *inode, int frag)
                struct buffer_head *bh;
                int n = *p++;
 
-               bh = bread(sb->s_dev, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift),
-                               sb->s_blocksize);
+               bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
                if (!bh)
                        goto out;
                block = ((u32*) bh->b_data)[n & mask];
@@ -147,8 +146,7 @@ repeat:
        lastfrag = inode->u.ufs_i.i_lastfrag;
        if (tmp && fragment < lastfrag) {
                if (metadata) {
-                       result = getblk (sb->s_dev, uspi->s_sbbase + tmp + blockoff,
-                                        sb->s_blocksize);
+                       result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
                        if (tmp == fs32_to_cpu(sb, *p)) {
                                UFSD(("EXIT, result %u\n", tmp + blockoff))
                                return result;
@@ -216,7 +214,7 @@ repeat:
         * now. -DaveM
         */
        if (metadata) {
-               result = getblk (inode->i_dev, tmp + blockoff, sb->s_blocksize);
+               result = sb_getblk(inode->i_sb, tmp + blockoff);
        } else {
                *phys = tmp;
                result = NULL;
@@ -264,8 +262,7 @@ repeat:
        tmp = fs32_to_cpu(sb, *p);
        if (tmp) {
                if (metadata) {
-                       result = getblk (bh->b_dev, uspi->s_sbbase + tmp + blockoff,
-                                        sb->s_blocksize);
+                       result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
                        if (tmp == fs32_to_cpu(sb, *p))
                                goto out;
                        brelse (result);
@@ -292,7 +289,7 @@ repeat:
         * now. -DaveM
         */
        if (metadata) {
-               result = getblk (bh->b_dev, tmp + blockoff, sb->s_blocksize);
+               result = sb_getblk(sb, tmp + blockoff);
        } else {
                *phys = tmp;
                *new = 1;
@@ -425,7 +422,7 @@ struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
        *err = error;
        if (!error && buffer_mapped(&dummy)) {
                struct buffer_head *bh;
-               bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
+               bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
                if (buffer_new(&dummy)) {
                        memset(bh->b_data, 0, inode->i_sb->s_blocksize);
                        mark_buffer_uptodate(bh, 1);
@@ -500,7 +497,7 @@ void ufs_read_inode (struct inode * inode)
                return;
        }
        
-       bh = bread (sb->s_dev, uspi->s_sbbase + ufs_inotofsba(inode->i_ino), sb->s_blocksize);
+       bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
        if (!bh) {
                ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
                return;
@@ -591,7 +588,7 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
                return -1;
        }
 
-       bh = bread (sb->s_dev, ufs_inotofsba(inode->i_ino), sb->s_blocksize);
+       bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
        if (!bh) {
                ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
                return -1;
index 8cdb4c852fcc335812f92c70d56ef6ea8f0d17ca..9421f5960524333cba137d3a16dc2d935add7061 100644 (file)
@@ -339,7 +339,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) {
                size = uspi->s_bsize;
                if (i + uspi->s_fpb > blks)
                        size = (blks - i) * uspi->s_fsize;
-               ubh = ubh_bread(sb->s_dev, uspi->s_csaddr + i, size);
+               ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
                if (!ubh)
                        goto failed;
                ubh_ubhcpymem (space, ubh, size);
@@ -363,7 +363,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) {
        }
        for (i = 0; i < uspi->s_ncg; i++) {
                UFSD(("read cg %u\n", i))
-               if (!(sb->u.ufs_sb.s_ucg[i] = bread (sb->s_dev, ufs_cgcmin(i), sb->s_blocksize)))
+               if (!(sb->u.ufs_sb.s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
                        goto failed;
                if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data))
                        goto failed;
@@ -414,7 +414,7 @@ void ufs_put_cylinder_structures (struct super_block * sb) {
                size = uspi->s_bsize;
                if (i + uspi->s_fpb > blks)
                        size = (blks - i) * uspi->s_fsize;
-               ubh = ubh_bread (sb->s_dev, uspi->s_csaddr + i, size);
+               ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
                ubh_memcpyubh (ubh, space, size);
                space += size;
                ubh_mark_buffer_uptodate (ubh, 1);
@@ -597,11 +597,12 @@ struct super_block * ufs_read_super (struct super_block * sb, void * data,
        
 again: 
        set_blocksize (sb->s_dev, block_size);
+       sb->s_blocksize = block_size;
 
        /*
         * read ufs super block from device
         */
-       ubh = ubh_bread_uspi (uspi, sb->s_dev, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
+       ubh = ubh_bread_uspi (uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
        if (!ubh) 
                goto failed;
        
index e90fa8f7b8ac192c215cb07b8299489730086c38..fc4cb9c386c7446829648addcf58e791d666edd5 100644 (file)
@@ -114,7 +114,7 @@ static int ufs_trunc_direct (struct inode * inode)
        frag1 = ufs_fragnum (frag1);
        frag2 = ufs_fragnum (frag2);
        for (j = frag1; j < frag2; j++) {
-               bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+               bh = sb_get_hash_table (sb, tmp + j);
                if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
                        retry = 1;
                        brelse (bh);
@@ -137,7 +137,7 @@ next1:
                if (!tmp)
                        continue;
                for (j = 0; j < uspi->s_fpb; j++) {
-                       bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+                       bh = sb_get_hash_table(sb, tmp + j);
                        if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
                                retry = 1;
                                brelse (bh);
@@ -176,7 +176,7 @@ next2:;
                ufs_panic(sb, "ufs_truncate_direct", "internal error");
        frag4 = ufs_fragnum (frag4);
        for (j = 0; j < frag4; j++) {
-               bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+               bh = sb_get_hash_table (sb, tmp + j);
                if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
                        retry = 1;
                        brelse (bh);
@@ -218,7 +218,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p)
        tmp = fs32_to_cpu(sb, *p);
        if (!tmp)
                return 0;
-       ind_ubh = ubh_bread (sb->s_dev, tmp, uspi->s_bsize);
+       ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
        if (tmp != fs32_to_cpu(sb, *p)) {
                ubh_brelse (ind_ubh);
                return 1;
@@ -235,7 +235,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p)
                if (!tmp)
                        continue;
                for (j = 0; j < uspi->s_fpb; j++) {
-                       bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+                       bh = sb_get_hash_table(sb, tmp + j);
                        if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
                                retry = 1;
                                brelse (bh);
@@ -312,7 +312,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p)
        tmp = fs32_to_cpu(sb, *p);
        if (!tmp)
                return 0;
-       dind_bh = ubh_bread (inode->i_dev, tmp, uspi->s_bsize);
+       dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
        if (tmp != fs32_to_cpu(sb, *p)) {
                ubh_brelse (dind_bh);
                return 1;
@@ -378,7 +378,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
        p = inode->u.ufs_i.i_u1.i_data + UFS_TIND_BLOCK;
        if (!(tmp = fs32_to_cpu(sb, *p)))
                return 0;
-       tind_bh = ubh_bread (sb->s_dev, tmp, uspi->s_bsize);
+       tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
        if (tmp != fs32_to_cpu(sb, *p)) {
                ubh_brelse (tind_bh);
                return 1;
index 06f2cb8d56f56d04f5fde0f0592fe4e2cf33f4fa..2d94ed5531d79d986b4b36ce8d4808d58a406cdf 100644 (file)
@@ -23,7 +23,7 @@
 
 
 struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
-       kdev_t dev, unsigned fragment, unsigned size)
+       struct super_block *sb, unsigned fragment, unsigned size)
 {
        struct ufs_buffer_head * ubh;
        unsigned i, j, count;
@@ -39,7 +39,7 @@ struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
        ubh->fragment = fragment;
        ubh->count = count;
        for (i = 0; i < count; i++)
-               if (!(ubh->bh[i] = bread (dev, fragment + i, uspi->s_fsize)))
+               if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
                        goto failed;
        for (; i < UFS_MAXFRAG; i++)
                ubh->bh[i] = NULL;
@@ -51,7 +51,7 @@ failed:
 }
 
 struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
-       kdev_t dev, unsigned fragment, unsigned size)
+       struct super_block *sb, unsigned fragment, unsigned size)
 {
        unsigned i, j, count;
        if (size & ~uspi->s_fmask)
@@ -62,7 +62,7 @@ struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
        USPI_UBH->fragment = fragment;
        USPI_UBH->count = count;
        for (i = 0; i < count; i++)
-               if (!(USPI_UBH->bh[i] = bread (dev, fragment + i, uspi->s_fsize)))
+               if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i)))
                        goto failed;
        for (; i < UFS_MAXFRAG; i++)
                USPI_UBH->bh[i] = NULL;
index 5ee0ecb3c1a106332535669df4eb4b92f2f8fb2e..2e5d4760282941d6dcc21c738b4669e558fc6546 100644 (file)
@@ -226,9 +226,9 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
 /*
  * These functions manipulate ufs buffers
  */
-#define ubh_bread(dev,fragment,size) _ubh_bread_(uspi,dev,fragment,size)  
-extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, kdev_t, unsigned, unsigned);
-extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, kdev_t, unsigned, unsigned);
+#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)  
+extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, unsigned, unsigned);
+extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, unsigned, unsigned);
 extern void ubh_brelse (struct ufs_buffer_head *);
 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
index d8d68e8c296d3cb819f33b743b87be6e2ff872d8..a140326a59902c60f63194aa2559ad0f3b081766 100644 (file)
@@ -101,13 +101,6 @@ extern void iounmap(void *addr);
 #define bus_to_virt phys_to_virt
 #define page_to_bus page_to_phys
 
-/*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2)   \
-       ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
 /*
  * readX/writeX() are used to access memory mapped devices. On some
  * architectures the memory mapped IO stuff needs to be accessed
index 82badf63fdfd0c808f84a9beb2ee1fef5230f73a..b7c3f7a285e8ba56817ef4830afc1a50d3799b6f 100644 (file)
@@ -38,7 +38,6 @@ extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
 extern void (*mach_hd_setup)(char *, int *);
 extern long mach_max_dma_address;
 extern void (*mach_floppy_setup)(char *, int *);
-extern void (*mach_floppy_eject)(void);
 extern void (*mach_heartbeat) (int);
 extern void (*mach_l2_flush) (int);
 extern int mach_sysrq_key;
index fce01fcd21a945c22de89548fbe6fed6c2309841..b80f2f7ab8ce7fc5b3adccbb1ccfaf4d194e51d1 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: dma.h,v 1.19 2000/01/28 13:43:14 jj Exp $
+/* $Id: dma.h,v 1.21 2001/12/13 04:16:52 davem Exp $
  * include/asm-sparc64/dma.h
  *
  * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
@@ -218,10 +218,4 @@ extern int isa_dma_bridge_buggy;
 #define isa_dma_bridge_buggy   (0)
 #endif
 
-/* We support dynamic DMA remapping and adjacent SG entries
- * which have addresses modulo DMA_CHUNK_SIZE will be merged
- * by dma_prepare_sg().
- */
-#define DMA_CHUNK_SIZE 8192
-
 #endif /* !(_ASM_SPARC64_DMA_H) */
index 258428abdae72f7e3296d439ab63c64e25540672..8b8c056caf029783761ee79191a79b353708c28f 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.40 2001/11/10 09:24:56 davem Exp $ */
+/* $Id: io.h,v 1.46 2001/12/13 04:16:52 davem Exp $ */
 #ifndef __SPARC64_IO_H
 #define __SPARC64_IO_H
 
@@ -18,11 +18,10 @@ extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
 extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
 #define bus_to_virt bus_to_virt_not_defined_use_pci_map
 
+/* BIO layer definitions. */
 extern unsigned long phys_base;
 #define page_to_phys(page)     ((((page) - mem_map) << PAGE_SHIFT)+phys_base)
-
-#define BIOVEC_MERGEABLE(vec1, vec2)   \
-       ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (DMA_CHUNK_SIZE - 1)) == 0)
+#define BIO_VMERGE_BOUNDARY    8192
 
 /* Different PCI controllers we support have their PCI MEM space
  * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
@@ -258,6 +257,7 @@ static __inline__ void _raw_writeq(u64 q, unsigned long addr)
 #define __raw_readb(__addr)            (_raw_readb((unsigned long)(__addr)))
 #define __raw_readw(__addr)            (_raw_readw((unsigned long)(__addr)))
 #define __raw_readl(__addr)            (_raw_readl((unsigned long)(__addr)))
+#define __raw_readq(__addr)            (_raw_readq((unsigned long)(__addr)))
 #define __raw_writeb(__b, __addr)      (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
 #define __raw_writew(__w, __addr)      (_raw_writew((u16)(__w), (unsigned long)(__addr)))
 #define __raw_writel(__l, __addr)      (_raw_writel((u32)(__l), (unsigned long)(__addr)))
@@ -415,7 +415,7 @@ out:
  */
 #define ioremap(__offset, __size)      ((void *)(__offset))
 #define ioremap_nocache(X,Y)           ioremap((X),(Y))
-#define iounmap(__addr)                        do { } while(0)
+#define iounmap(__addr)                        do { (void)(__addr); } while(0)
 
 /* Similarly for SBUS. */
 #define sbus_ioremap(__res, __offset, __size, __name) \
index 3a264a7e9063acd86e00370b4fbcd2bd78754096..39bbdbbe4f79599bfc8613927de601d89e2b097a 100644 (file)
@@ -31,7 +31,7 @@ affs_bread(struct super_block *sb, int block)
 {
        pr_debug(KERN_DEBUG "affs_bread: %d\n", block);
        if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
-               return bread(sb->s_dev, block, sb->s_blocksize);
+               return sb_bread(sb, block);
        return NULL;
 }
 static inline struct buffer_head *
@@ -39,7 +39,7 @@ affs_getblk(struct super_block *sb, int block)
 {
        pr_debug(KERN_DEBUG "affs_getblk: %d\n", block);
        if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
-               return getblk(sb->s_dev, block, sb->s_blocksize);
+               return sb_getblk(sb, block);
        return NULL;
 }
 static inline struct buffer_head *
@@ -48,10 +48,11 @@ affs_getzeroblk(struct super_block *sb, int block)
        struct buffer_head *bh;
        pr_debug(KERN_DEBUG "affs_getzeroblk: %d\n", block);
        if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
-               bh = getblk(sb->s_dev, block, sb->s_blocksize);
-               wait_on_buffer(bh);
+               bh = sb_getblk(sb, block);
+               lock_buffer(bh);
                memset(bh->b_data, 0 , sb->s_blocksize);
                mark_buffer_uptodate(bh, 1);
+               unlock_buffer(bh);
                return bh;
        }
        return NULL;
@@ -62,7 +63,7 @@ affs_getemptyblk(struct super_block *sb, int block)
        struct buffer_head *bh;
        pr_debug(KERN_DEBUG "affs_getemptyblk: %d\n", block);
        if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
-               bh = getblk(sb->s_dev, block, sb->s_blocksize);
+               bh = sb_getblk(sb, block);
                wait_on_buffer(bh);
                mark_buffer_uptodate(bh, 1);
                return bh;
index a7c0c25763819eb2fd51781b0bbb9a39c037826c..8bbacfeebeeb703172f907bbf49b4fa93215b98a 100644 (file)
 #ifndef __LINUX_BIO_H
 #define __LINUX_BIO_H
 
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY    0
+#endif
+
 #define BIO_DEBUG
 
 #ifdef BIO_DEBUG
@@ -61,7 +67,17 @@ struct bio {
 
        unsigned short          bi_vcnt;        /* how many bio_vec's */
        unsigned short          bi_idx;         /* current index into bvl_vec */
-       unsigned short          bi_hw_seg;      /* actual mapped segments */
+
+       /* Number of segments in this BIO after
+        * physical address coalescing is performed.
+        */
+       unsigned short          bi_phys_segments;
+
+       /* Number of segments after physical and DMA remapping
+        * hardware coalescing is performed.
+        */
+       unsigned short          bi_hw_segments;
+
        unsigned int            bi_size;        /* residual I/O count */
        unsigned int            bi_max;         /* max bvl_vecs we can hold,
                                                   used as index into pool */
@@ -128,10 +144,13 @@ struct bio {
 /*
  * merge helpers etc
  */
+
 #define __BVEC_END(bio)                bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
 #define __BVEC_START(bio)      bio_iovec_idx((bio), 0)
-#define BIO_CONTIG(bio, nxt) \
-       BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt)))
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
+       ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2)      \
+       ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -174,6 +193,7 @@ extern void bio_put(struct bio *);
 
 extern int bio_endio(struct bio *, int, int);
 struct request_queue;
+extern inline int bio_phys_segments(struct request_queue *, struct bio *);
 extern inline int bio_hw_segments(struct request_queue *, struct bio *);
 
 extern inline void __bio_clone(struct bio *, struct bio *);
index fad87a308171c3ea2350eb50fe0ab584a5ef1033..620b149ec617a075e7bcea899b3cff3acedbf337 100644 (file)
@@ -41,8 +41,19 @@ struct request {
                                         * touch them
                                         */
        unsigned long hard_nr_sectors;
-       unsigned short nr_segments;
+
+       /* Number of scatter-gather DMA addr+len pairs after
+        * physical address coalescing is performed.
+        */
+       unsigned short nr_phys_segments;
+
+       /* Number of scatter-gather addr+len pairs after
+        * physical and DMA remapping hardware coalescing is performed.
+        * This is the number of scatter-gather entries the driver
+        * will actually have to deal with after DMA mapping is done.
+        */
        unsigned short nr_hw_segments;
+
        unsigned int current_nr_sectors;
        unsigned int hard_cur_sectors;
        void *special;
@@ -146,6 +157,7 @@ struct request_queue
         * queue needs bounce pages for pages above this limit
         */
        unsigned long           bounce_pfn;
+       int                     bounce_gfp;
 
        /*
         * This is used to remove the plug when tq_disk runs.
@@ -166,7 +178,8 @@ struct request_queue
         * queue settings
         */
        unsigned short          max_sectors;
-       unsigned short          max_segments;
+       unsigned short          max_phys_segments;
+       unsigned short          max_hw_segments;
        unsigned short          hardsect_size;
        unsigned int            max_segment_size;
 
@@ -202,19 +215,22 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
 
 #define BLK_BOUNCE_HIGH        (blk_max_low_pfn << PAGE_SHIFT)
 #define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
 
 #ifdef CONFIG_HIGHMEM
 
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
 
 extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
 {
-       create_bounce(q->bounce_pfn, bio);
+       create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
 }
 
 #else /* CONFIG_HIGHMEM */
 
 #define blk_queue_bounce(q, bio)       do { } while (0)
+#define init_emergency_isa_pool()      do { } while (0)
 
 #endif /* CONFIG_HIGHMEM */
 
@@ -257,7 +273,8 @@ extern struct request *blk_get_request(request_queue_t *, int, int);
 extern void blk_put_request(struct request *);
 extern void blk_plug_device(request_queue_t *);
 extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
 extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
 
 extern int block_ioctl(kdev_t, unsigned int, unsigned long);
@@ -270,7 +287,8 @@ extern void blk_cleanup_queue(request_queue_t *);
 extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
 extern void blk_queue_bounce_limit(request_queue_t *, u64);
 extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
-extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
 extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
 extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
 extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
@@ -284,7 +302,8 @@ extern int * blksize_size[MAX_BLKDEV];
 
 extern int * max_readahead[MAX_BLKDEV];
 
-#define MAX_SEGMENTS 128
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
 #define MAX_SECTORS 255
 
 #define MAX_SEGMENT_SIZE       65536
diff --git a/include/linux/blkdev.h.orig b/include/linux/blkdev.h.orig
new file mode 100644 (file)
index 0000000..620b149
--- /dev/null
@@ -0,0 +1,371 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include <asm/scatterlist.h>
+
+struct request_queue;
+typedef struct request_queue request_queue_t;
+struct elevator_s;
+typedef struct elevator_s elevator_t;
+
+struct request_list {
+       unsigned int count;
+       struct list_head free;
+       wait_queue_head_t wait;
+};
+
+struct request {
+       struct list_head queuelist; /* looking for ->queue? you must _not_
+                                    * access it directly, use
+                                    * blkdev_dequeue_request! */
+       int elevator_sequence;
+
+       unsigned char cmd[16];
+
+       unsigned long flags;            /* see REQ_ bits below */
+
+       int rq_status;  /* should split this into a few status bits */
+       kdev_t rq_dev;
+       int errors;
+       sector_t sector;
+       unsigned long nr_sectors;
+       unsigned long hard_sector;      /* the hard_* are block layer
+                                        * internals, no driver should
+                                        * touch them
+                                        */
+       unsigned long hard_nr_sectors;
+
+       /* Number of scatter-gather DMA addr+len pairs after
+        * physical address coalescing is performed.
+        */
+       unsigned short nr_phys_segments;
+
+       /* Number of scatter-gather addr+len pairs after
+        * physical and DMA remapping hardware coalescing is performed.
+        * This is the number of scatter-gather entries the driver
+        * will actually have to deal with after DMA mapping is done.
+        */
+       unsigned short nr_hw_segments;
+
+       unsigned int current_nr_sectors;
+       unsigned int hard_cur_sectors;
+       void *special;
+       char *buffer;
+       struct completion *waiting;
+       struct bio *bio, *biotail;
+       request_queue_t *q;
+       struct request_list *rl;
+};
+
+/*
+ * first three bits match BIO_RW* bits, important
+ */
+enum rq_flag_bits {
+       __REQ_RW,       /* not set, read. set, write */
+       __REQ_RW_AHEAD, /* READA */
+       __REQ_BARRIER,  /* may not be passed */
+       __REQ_CMD,      /* is a regular fs rw request */
+       __REQ_NOMERGE,  /* don't touch this for merging */
+       __REQ_STARTED,  /* drive already may have started this one */
+       __REQ_DONTPREP, /* don't call prep for this one */
+       /*
+        * for IDE
+       */
+       __REQ_DRIVE_CMD,
+       __REQ_DRIVE_TASK,
+
+       __REQ_PC,       /* packet command (special) */
+       __REQ_BLOCK_PC, /* queued down pc from block layer */
+       __REQ_SENSE,    /* sense retrival */
+
+       __REQ_SPECIAL,  /* driver special command */
+
+       __REQ_NR_BITS,  /* stops here */
+};
+
+#define REQ_RW         (1 << __REQ_RW)
+#define REQ_RW_AHEAD   (1 << __REQ_RW_AHEAD)
+#define REQ_BARRIER    (1 << __REQ_BARRIER)
+#define REQ_CMD                (1 << __REQ_CMD)
+#define REQ_NOMERGE    (1 << __REQ_NOMERGE)
+#define REQ_STARTED    (1 << __REQ_STARTED)
+#define REQ_DONTPREP   (1 << __REQ_DONTPREP)
+#define REQ_DRIVE_CMD  (1 << __REQ_DRIVE_CMD)
+#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
+#define REQ_PC         (1 << __REQ_PC)
+#define REQ_SENSE      (1 << __REQ_SENSE)
+#define REQ_BLOCK_PC   (1 << __REQ_BLOCK_PC)
+#define REQ_SPECIAL    (1 << __REQ_SPECIAL)
+
+#include <linux/elevator.h>
+
+typedef int (merge_request_fn) (request_queue_t *, struct request *,
+                               struct bio *);
+typedef int (merge_requests_fn) (request_queue_t *, struct request *,
+                                struct request *);
+typedef void (request_fn_proc) (request_queue_t *q);
+typedef request_queue_t * (queue_proc) (kdev_t dev);
+typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
+typedef int (prep_rq_fn) (request_queue_t *, struct request *);
+typedef void (unplug_device_fn) (void *q);
+
+enum blk_queue_state {
+       Queue_down,
+       Queue_up,
+};
+
+/*
+ * Default nr free requests per queue, ll_rw_blk will scale it down
+ * according to available RAM at init time
+ */
+#define QUEUE_NR_REQUESTS      8192
+
+struct request_queue
+{
+       /*
+        * the queue request freelist, one for reads and one for writes
+        */
+       struct request_list     rq[2];
+
+       /*
+        * Together with queue_head for cacheline sharing
+        */
+       struct list_head        queue_head;
+       elevator_t              elevator;
+
+       request_fn_proc         *request_fn;
+       merge_request_fn        *back_merge_fn;
+       merge_request_fn        *front_merge_fn;
+       merge_requests_fn       *merge_requests_fn;
+       make_request_fn         *make_request_fn;
+       prep_rq_fn              *prep_rq_fn;
+
+       /*
+        * The queue owner gets to use this for whatever they like.
+        * ll_rw_blk doesn't touch it.
+        */
+       void                    *queuedata;
+
+       /*
+        * queue needs bounce pages for pages above this limit
+        */
+       unsigned long           bounce_pfn;
+       int                     bounce_gfp;
+
+       /*
+        * This is used to remove the plug when tq_disk runs.
+        */
+       struct tq_struct        plug_tq;
+
+       /*
+        * various queue flags, see QUEUE_* below
+        */
+       unsigned long           queue_flags;
+
+       /*
+        * protects queue structures from reentrancy
+        */
+       spinlock_t              *queue_lock;
+
+       /*
+        * queue settings
+        */
+       unsigned short          max_sectors;
+       unsigned short          max_phys_segments;
+       unsigned short          max_hw_segments;
+       unsigned short          hardsect_size;
+       unsigned int            max_segment_size;
+
+       unsigned long           seg_boundary_mask;
+
+       wait_queue_head_t       queue_wait;
+};
+
+#define RQ_INACTIVE            (-1)
+#define RQ_ACTIVE              1
+#define RQ_SCSI_BUSY           0xffff
+#define RQ_SCSI_DONE           0xfffe
+#define RQ_SCSI_DISCONNECTING  0xffe0
+
+#define QUEUE_FLAG_PLUGGED     0       /* queue is plugged */
+#define QUEUE_FLAG_NOSPLIT     1       /* can process bio over several goes */
+#define QUEUE_FLAG_CLUSTER     2       /* cluster several segments into 1 */
+
+#define blk_queue_plugged(q)   test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_mark_plugged(q)    set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_queue_empty(q)     elv_queue_empty(q)
+#define list_entry_rq(ptr)     list_entry((ptr), struct request, queuelist)
+
+#define rq_data_dir(rq)                ((rq)->flags & 1)
+
+/*
+ * noop, requests are automagically marked as active/inactive by I/O
+ * scheduler -- see elv_next_request
+ */
+#define blk_queue_headactive(q, head_active)
+
+extern unsigned long blk_max_low_pfn, blk_max_pfn;
+
+#define BLK_BOUNCE_HIGH        (blk_max_low_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
+
+#ifdef CONFIG_HIGHMEM
+
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
+
+extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+{
+       create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define blk_queue_bounce(q, bio)       do { } while (0)
+#define init_emergency_isa_pool()      do { } while (0)
+
+#endif /* CONFIG_HIGHMEM */
+
+#define rq_for_each_bio(bio, rq)       \
+       if ((rq->bio))                  \
+               for (bio = (rq)->bio; bio; bio = bio->bi_next)
+
+struct blk_dev_struct {
+       /*
+        * queue_proc has to be atomic
+        */
+       request_queue_t         request_queue;
+       queue_proc              *queue;
+       void                    *data;
+};
+
+struct sec_size {
+       unsigned block_size;
+       unsigned block_size_bits;
+};
+
+/*
+ * Used to indicate the default queue for drivers that don't bother
+ * to implement multiple queues.  We have this access macro here
+ * so as to eliminate the need for each and every block device
+ * driver to know about the internal structure of blk_dev[].
+ */
+#define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern void grok_partitions(kdev_t dev, long size);
+extern int wipe_partitions(kdev_t dev);
+extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
+extern void generic_make_request(struct bio *bio);
+extern inline request_queue_t *blk_get_queue(kdev_t dev);
+extern void blkdev_release_request(struct request *);
+extern void blk_attempt_remerge(request_queue_t *, struct request *);
+extern struct request *blk_get_request(request_queue_t *, int, int);
+extern void blk_put_request(struct request *);
+extern void blk_plug_device(request_queue_t *);
+extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
+
+extern int block_ioctl(kdev_t, unsigned int, unsigned long);
+
+/*
+ * Access functions for manipulating queue properties
+ */
+extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(request_queue_t *);
+extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
+extern void blk_queue_bounce_limit(request_queue_t *, u64);
+extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
+extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
+extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
+extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern void blk_dump_rq_flags(struct request *, char *);
+extern void generic_unplug_device(void *);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * max_readahead[MAX_BLKDEV];
+
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
+#define MAX_SECTORS 255
+
+#define MAX_SEGMENT_SIZE       65536
+
+/* read-ahead in pages.. */
+#define MAX_READAHEAD  31
+#define MIN_READAHEAD  3
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
+#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev)
+
+extern void drive_stat_acct(struct request *, int, int);
+
+extern inline void blk_clear(int major)
+{
+       blk_size[major] = NULL;
+#if 0
+       blk_size_in_bytes[major] = NULL;
+#endif
+       blksize_size[major] = NULL;
+       max_readahead[major] = NULL;
+       read_ahead[major] = 0;
+}
+
+extern inline int get_hardsect_size(kdev_t dev)
+{
+       request_queue_t *q = blk_get_queue(dev);
+       int retval = 512;
+
+       if (q && q->hardsect_size)
+               retval = q->hardsect_size;
+
+       return retval;
+}
+
+#define blk_finished_io(nsects)        do { } while (0)
+#define blk_started_io(nsects) do { } while (0)
+
+extern inline unsigned int blksize_bits(unsigned int size)
+{
+       unsigned int bits = 8;
+       do {
+               bits++;
+               size >>= 1;
+       } while (size > 256);
+       return bits;
+}
+
+extern inline unsigned int block_size(kdev_t dev)
+{
+       int retval = BLOCK_SIZE;
+       int major = MAJOR(dev);
+
+       if (blksize_size[major]) {
+               int minor = MINOR(dev);
+               if (blksize_size[major][minor])
+                       retval = blksize_size[major][minor];
+       }
+       return retval;
+}
+
+#endif
index c0ed2792ba8b389c8261992a3f844992218109d2..187785b83958c0a27d2fb63bf460669b971b6f33 100644 (file)
@@ -369,10 +369,4 @@ struct floppy_raw_cmd {
 #define FDEJECT _IO(2, 0x5a)
 /* eject the disk */
 
-
-#ifdef __KERNEL__
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-void floppy_eject(void);
-#endif
-
 #endif
index 7f52b46d619f85d0135315ecd8a7c098da2b3f6e..b1e59c161107bfc8e54829b5023a70a9895bfcb5 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/cache.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
-#include <linux/bio.h>
 
 #include <asm/atomic.h>
 #include <asm/bitops.h>
@@ -1363,6 +1362,7 @@ extern struct buffer_head * get_hash_table(kdev_t, sector_t, int);
 extern struct buffer_head * getblk(kdev_t, sector_t, int);
 extern void ll_rw_block(int, int, struct buffer_head * bh[]);
 extern int submit_bh(int, struct buffer_head *);
+struct bio;
 extern int submit_bio(int, struct bio *);
 extern int is_read_only(kdev_t);
 extern void __brelse(struct buffer_head *);
@@ -1379,6 +1379,18 @@ static inline void bforget(struct buffer_head *buf)
 }
 extern int set_blocksize(kdev_t, int);
 extern struct buffer_head * bread(kdev_t, int, int);
+static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
+{
+       return bread(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
+{
+       return getblk(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
+{
+       return get_hash_table(sb->s_dev, block, sb->s_blocksize);
+}
 extern void wakeup_bdflush(void);
 extern void put_unused_buffer_head(struct buffer_head * bh);
 extern struct buffer_head * get_unused_buffer_head(int async);
index 157c3b62fc347e141fe9542e0fe611d247540bda..7aa92d2c257a40cff5d1e997bad48235ba9a8694 100644 (file)
@@ -2,6 +2,7 @@
 #define _LINUX_HIGHMEM_H
 
 #include <linux/config.h>
+#include <linux/bio.h>
 #include <asm/pgalloc.h>
 
 #ifdef CONFIG_HIGHMEM
@@ -13,7 +14,7 @@ extern struct page *highmem_start_page;
 /* declarations for linux/mm/highmem.c */
 unsigned int nr_free_highpages(void);
 
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
 
 static inline char *bh_kmap(struct buffer_head *bh)
 {
index 82dde80812ae483af35968bf6e28366324ca8ce2..9cdfbaea416dd97fceab5165ffb18f370bf37178 100644 (file)
@@ -219,7 +219,7 @@ int get_joliet_filename(struct iso_directory_record *, unsigned char *, struct i
 int get_acorn_filename(struct iso_directory_record *, char *, struct inode *);
 
 extern struct dentry *isofs_lookup(struct inode *, struct dentry *);
-extern struct buffer_head *isofs_bread(struct inode *, unsigned int, unsigned int);
+extern struct buffer_head *isofs_bread(struct inode *, unsigned int);
 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
 
 extern struct inode_operations isofs_dir_inode_operations;
@@ -230,11 +230,11 @@ extern struct address_space_operations isofs_symlink_aops;
 #ifdef LEAK_CHECK
 #define free_s leak_check_free_s
 #define malloc leak_check_malloc
-#define bread leak_check_bread
+#define sb_bread leak_check_bread
 #define brelse leak_check_brelse
 extern void * leak_check_malloc(unsigned int size);
 extern void leak_check_free_s(void * obj, int size);
-extern struct buffer_head * leak_check_bread(int dev, int block, int size);
+extern struct buffer_head * leak_check_bread(struct super_block *sb, int block);
 extern void leak_check_brelse(struct buffer_head * bh);
 #endif /* LEAK_CHECK */
 
index dd9b7cb6efb5bc3597eb97fcbbd2f182cc7af2c7..55ba2f99d9a23cc6642a700206bce6b639afc1dc 100644 (file)
@@ -118,7 +118,7 @@ extern int qnx4_unlink(struct inode *dir, struct dentry *dentry);
 extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
 extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int);
 extern int qnx4_sync_inode(struct inode *inode);
-extern int qnx4_get_block(struct inode *inode, long iblock, struct buffer_head *bh, int create);
+extern int qnx4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create);
 
 #endif                         /* __KERNEL__ */
 
index e6a94292c2b41cc18c386c0e5e6df5d7cc6859cd..3aab59aec4fcbc4cac60567f85012caf22061ad9 100644 (file)
@@ -351,23 +351,20 @@ static int __init create_dev(char *name, kdev_t dev, char *devfs_name)
        return sys_symlink(path + n + 5, name);
 }
 
-#ifdef CONFIG_MAC_FLOPPY
-int swim3_fd_eject(int devnum);
-#endif
 static void __init change_floppy(char *fmt, ...)
 {
        extern void wait_for_keypress(void);
        char buf[80];
+       int fd;
        va_list args;
        va_start(args, fmt);
        vsprintf(buf, fmt, args);
        va_end(args);
-#ifdef CONFIG_BLK_DEV_FD
-       floppy_eject();
-#endif
-#ifdef CONFIG_MAC_FLOPPY
-       swim3_fd_eject(MINOR(ROOT_DEV));
-#endif
+       fd = open("/dev/root", O_RDWR, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, FDEJECT, 0);
+               close(fd);
+       }
        printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
        wait_for_keypress();
 }
index bd626a165483c74c8659390de2b89ca7db6ad23e..55a53c02be1761fa6dadb152a857e6532b6e22bf 100644 (file)
@@ -60,7 +60,7 @@ extern void set_device_ro(kdev_t dev,int flag);
 extern void *sys_call_table;
 
 extern struct timezone sys_tz;
-extern int request_dma(unsigned int dmanr, char * deviceID);
+extern int request_dma(unsigned int dmanr, const char * deviceID);
 extern void free_dma(unsigned int dmanr);
 extern spinlock_t dma_spin_lock;
 
index 44acecd851c7db17498cd2e0b388d5476090ba10..b6958912e43188d23fba7406e3ae2a527544057b 100644 (file)
@@ -649,8 +649,10 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
  *
- * POSIX specifies that kill(-1,sig) is unspecified, but what we have
- * is probably wrong.  Should make it like BSD or SYSV.
+ * POSIX (2001) specifies "If pid is -1, sig shall be sent to all processes
+ * (excluding an unspecified set of system processes) for which the process
+ * has permission to send that signal."
+ * So, probably the process should also signal itself.
  */
 
 static int kill_something_info(int sig, struct siginfo *info, int pid)
@@ -663,7 +665,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
 
                read_lock(&tasklist_lock);
                for_each_task(p) {
-                       if (p->pid > 1 && p != current) {
+                       if (p->pid > 1) {
                                int err = send_sig_info(sig, info, p);
                                ++count;
                                if (err != -EPERM)
index bd53edf5452e789aec1b2b6942dc5dbb6e0befeb..0ae33bcc1a3d8dc05c2d2279f9b98fa4428ffe21 100644 (file)
@@ -2847,7 +2847,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
        unsigned long   limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
        loff_t          pos;
        struct page     *page, *cached_page;
-       unsigned long   written;
+       ssize_t         written;
        long            status = 0;
        int             err;
        unsigned        bytes;
index efdc8b71bc8cada8f677af6eaa8b06e30a802c63..72fd4e8c1b2075b52b624e87e18c7d41e0c30262 100644 (file)
@@ -184,13 +184,14 @@ void kunmap_high(struct page *page)
                wake_up(&pkmap_map_wait);
 }
 
-#define POOL_SIZE 64
+#define POOL_SIZE      64
+#define ISA_POOL_SIZE  16
 
-static mempool_t *page_pool;
+static mempool_t *page_pool, *isa_page_pool;
 
-static void * page_pool_alloc(int gfp_mask, void *data)
+static void *page_pool_alloc(int gfp_mask, void *data)
 {
-       return alloc_page(gfp_mask & ~ __GFP_HIGHIO);
+       return alloc_page(gfp_mask);
 }
 
 static void page_pool_free(void *page, void *data)
@@ -215,6 +216,23 @@ static __init int init_emergency_pool(void)
        return 0;
 }
 
+/*
+ * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
+ * as the max address, so check if the pool has already been created.
+ */
+int init_emergency_isa_pool(void)
+{
+       if (isa_page_pool)
+               return 0;
+
+       isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
+       if (!isa_page_pool)
+               BUG();
+
+       printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
+       return 0;
+}
+
 __initcall(init_emergency_pool);
 
 /*
@@ -248,7 +266,7 @@ static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
        }
 }
 
-static inline int bounce_end_io (struct bio *bio, int nr_sectors)
+static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *pool)
 {
        struct bio *bio_orig = bio->bi_private;
        struct bio_vec *bvec, *org_vec;
@@ -267,7 +285,7 @@ static inline int bounce_end_io (struct bio *bio, int nr_sectors)
                if (bvec->bv_page == org_vec->bv_page)
                        continue;
 
-               mempool_free(bvec->bv_page, page_pool); 
+               mempool_free(bvec->bv_page, pool);      
        }
 
 out_eio:
@@ -279,28 +297,53 @@ out_eio:
 
 static int bounce_end_io_write(struct bio *bio, int nr_sectors)
 {
-       return bounce_end_io(bio, nr_sectors);
+       return bounce_end_io(bio, nr_sectors, page_pool);
+}
+
+static int bounce_end_io_write_isa(struct bio *bio, int nr_sectors)
+{
+       return bounce_end_io(bio, nr_sectors, isa_page_pool);
 }
 
-static int bounce_end_io_read (struct bio *bio, int nr_sectors)
+static inline int __bounce_end_io_read(struct bio *bio, int nr_sectors,
+                                      mempool_t *pool)
 {
        struct bio *bio_orig = bio->bi_private;
 
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                copy_to_high_bio_irq(bio_orig, bio);
 
-       return bounce_end_io(bio, nr_sectors);
+       return bounce_end_io(bio, nr_sectors, pool);
+}
+
+static int bounce_end_io_read(struct bio *bio, int nr_sectors)
+{
+       return __bounce_end_io_read(bio, nr_sectors, page_pool);
 }
 
-void create_bounce(unsigned long pfn, struct bio **bio_orig)
+static int bounce_end_io_read_isa(struct bio *bio, int nr_sectors)
+{
+       return __bounce_end_io_read(bio, nr_sectors, isa_page_pool);
+}
+
+void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
 {
        struct page *page;
        struct bio *bio = NULL;
-       int i, rw = bio_data_dir(*bio_orig);
+       int i, rw = bio_data_dir(*bio_orig), bio_gfp;
        struct bio_vec *to, *from;
+       mempool_t *pool;
 
        BUG_ON((*bio_orig)->bi_idx);
 
+       if (!(gfp & GFP_DMA)) {
+               bio_gfp = GFP_NOHIGHIO;
+               pool = page_pool;
+       } else {
+               bio_gfp = GFP_NOIO;
+               pool = isa_page_pool;
+       }
+
        bio_for_each_segment(from, *bio_orig, i) {
                page = from->bv_page;
 
@@ -314,11 +357,11 @@ void create_bounce(unsigned long pfn, struct bio **bio_orig)
                 * irk, bounce it
                 */
                if (!bio)
-                       bio = bio_alloc(GFP_NOHIGHIO, (*bio_orig)->bi_vcnt);
+                       bio = bio_alloc(bio_gfp, (*bio_orig)->bi_vcnt);
 
                to = &bio->bi_io_vec[i];
 
-               to->bv_page = mempool_alloc(page_pool, GFP_NOHIGHIO);
+               to->bv_page = mempool_alloc(pool, gfp);
                to->bv_len = from->bv_len;
                to->bv_offset = from->bv_offset;
 
@@ -359,10 +402,17 @@ void create_bounce(unsigned long pfn, struct bio **bio_orig)
        bio->bi_idx = 0;
        bio->bi_size = (*bio_orig)->bi_size;
 
-       if (rw & WRITE)
-               bio->bi_end_io = bounce_end_io_write;
-       else
-               bio->bi_end_io = bounce_end_io_read;
+       if (pool == page_pool) {
+               if (rw & WRITE)
+                       bio->bi_end_io = bounce_end_io_write;
+               else
+                       bio->bi_end_io = bounce_end_io_read;
+       } else {
+               if (rw & WRITE)
+                       bio->bi_end_io = bounce_end_io_write_isa;
+               else
+                       bio->bi_end_io = bounce_end_io_read_isa;
+       }
 
        bio->bi_private = *bio_orig;
        *bio_orig = bio;
index 0c0bf99965ca11b449a9f714e3b5f7646e212873..ecf1acc80fea9341dec1203ae841138c0b9feb71 100644 (file)
@@ -176,7 +176,8 @@ void mempool_destroy(mempool_t *pool)
  *
  * this function only sleeps if the alloc_fn function sleeps or
  * returns NULL. Note that due to preallocation, this function
- * *never* fails.
+ * *never* fails when called from process contexts. (it might
+ * fail if called from an IRQ context.)
  */
 void * mempool_alloc(mempool_t *pool, int gfp_mask)
 {
@@ -185,7 +186,7 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
        struct list_head *tmp;
        int curr_nr;
        DECLARE_WAITQUEUE(wait, current);
-       int gfp_nowait = gfp_mask & ~__GFP_WAIT;
+       int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
 
 repeat_alloc:
        element = pool->alloc(gfp_nowait, pool->pool_data);
@@ -196,15 +197,11 @@ repeat_alloc:
         * If the pool is less than 50% full then try harder
         * to allocate an element:
         */
-       if (gfp_mask != gfp_nowait) {
-               if (pool->curr_nr <= pool->min_nr/2) {
-                       element = pool->alloc(gfp_mask, pool->pool_data);
-                       if (likely(element != NULL))
-                               return element;
-               }
-       } else
-               /* we must not sleep */
-               return NULL;
+       if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
+               element = pool->alloc(gfp_mask, pool->pool_data);
+               if (likely(element != NULL))
+                       return element;
+       }
 
        /*
         * Kick the VM at this point.
@@ -218,19 +215,25 @@ repeat_alloc:
                element = tmp;
                pool->curr_nr--;
                spin_unlock_irqrestore(&pool->lock, flags);
-
                return element;
        }
+       spin_unlock_irqrestore(&pool->lock, flags);
+
+       /* We must not sleep in the GFP_ATOMIC case */
+       if (gfp_mask == gfp_nowait)
+               return NULL;
+
+       run_task_queue(&tq_disk);
+
        add_wait_queue_exclusive(&pool->wait, &wait);
        set_task_state(current, TASK_UNINTERRUPTIBLE);
 
+       spin_lock_irqsave(&pool->lock, flags);
        curr_nr = pool->curr_nr;
        spin_unlock_irqrestore(&pool->lock, flags);
 
-       if (!curr_nr) {
-               run_task_queue(&tq_disk);
+       if (!curr_nr)
                schedule();
-       }
 
        current->state = TASK_RUNNING;
        remove_wait_queue(&pool->wait, &wait);