S: London SE16 1GD
S: United Kingdom
+N: Jan Harkes
+E: jaharkes@cs.cmu.edu
+W: http://www.coda.cs.cmu.edu/
+D: Coda file system
+S: Computer Science Department
+S: Carnegie Mellon University
+S: 5000 Forbes Avenue
+S: Pittsburgh, Pennsylvania 15213
+S: USA
+
N: Kai Harrekilde-Petersen
E: kai.harrekilde@get2net.dk
D: Original author of the ftape-HOWTO, i82078 fdc detection code.
M: Nils Faerber <nils@kernelconcepts.de>
S: Maintained
+CODA FILE SYSTEM
+P: Jan Harkes
+M: jaharkes@cs.cmu.edu
+M: coda@cs.cmu.edu
+L: codalist@coda.cs.cmu.edu
+W: http://www.coda.cs.cmu.edu/
+S: Maintained
+
COMPAQ FIBRE CHANNEL 64-bit/66MHz PCI non-intelligent HBA
P: Amy Vanzant-Hodge
M: Amy Vanzant-Hodge (fibrechannel@compaq.com)
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 1
-EXTRAVERSION =-pre5
+EXTRAVERSION =-pre6
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
MATCH(DMI_BIOS_DATE, "05/11/00"), NO_MATCH
} },
+ { swab_apm_power_in_minutes, "Sony VAIO", { /* Handle problems with APM on Sony Vaio PCG-Z600NE */
+ MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ MATCH(DMI_BIOS_VERSION, "WME01Z1"),
+ MATCH(DMI_BIOS_DATE, "08/11/00"), NO_MATCH
+ } },
+
{ swab_apm_power_in_minutes, "Sony VAIO", { /* Handle problems with APM on Sony Vaio PCG-Z505LS */
MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
MATCH(DMI_BIOS_VERSION, "R0203D0"),
O_TARGET := block.o
-export-objs := elevator.o ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o
+export-objs := elevator.o ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o block_ioctl.o
-obj-y := elevator.o ll_rw_blk.o blkpg.o genhd.o
+obj-y := elevator.o ll_rw_blk.o blkpg.o genhd.o block_ioctl.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
/*
* Common ioctl's for block devices
*/
-
+extern int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg);
int blk_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg)
{
request_queue_t *q;
if (!dev)
return -EINVAL;
+ intval = block_ioctl(dev, cmd, arg);
+ if (intval != -ENOTTY)
+ return intval;
+
switch (cmd) {
case BLKROSET:
if (!capable(CAP_SYS_ADMIN))
--- /dev/null
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ *
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/locks.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/blk.h>
+
+#include <linux/cdrom.h>
+
+int blk_do_rq(request_queue_t *q, struct request *rq)
+{
+ DECLARE_COMPLETION(wait);
+ int err = 0;
+
+ rq->flags |= REQ_BARRIER;
+ rq->waiting = &wait;
+ elv_add_request(q, rq);
+ generic_unplug_device(q);
+ wait_for_completion(&wait);
+
+ /*
+ * for now, never retry anything
+ */
+ if (rq->errors)
+ err = -EIO;
+
+ return err;
+}
+
+int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg)
+{
+ request_queue_t *q;
+ struct request *rq;
+ int close = 0, err;
+
+ q = blk_get_queue(dev);
+ if (!q)
+ return -ENXIO;
+
+ switch (cmd) {
+ case CDROMCLOSETRAY:
+ close = 1;
+ case CDROMEJECT:
+ rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ rq->flags = REQ_BLOCK_PC;
+ memset(rq->cmd, 0, sizeof(rq->cmd));
+ rq->cmd[0] = GPCMD_START_STOP_UNIT;
+ rq->cmd[4] = 0x02 + (close != 0);
+ err = blk_do_rq(q, rq);
+ blk_put_request(rq);
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
c->Request.Type.Type = TYPE_CMD; // It is a command.
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction =
- (creq->cmd == READ) ? XFER_READ: XFER_WRITE;
+ (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
c->Request.Timeout = 0; // Don't time out
- c->Request.CDB[0] = (creq->cmd == READ) ? CCISS_READ : CCISS_WRITE;
+ c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
start_blk = creq->sector;
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
blk_init_queue(q, do_cciss_request);
blk_queue_headactive(q, 0);
blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
- q->max_segments = MAXSGENTRIES;
+ blk_queue_max_segments(q, MAXSGENTRIES);
blk_queue_max_sectors(q, 512);
/* fill in the other Kernel structs */
blk_init_queue(q, do_ida_request);
blk_queue_headactive(q, 0);
blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
- q->max_segments = SG_MAX;
+ blk_queue_max_segments(q, SG_MAX);
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
read_ahead[MAJOR_NR+i] = READ_AHEAD;
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* Now do all the DMA Mappings */
- if (creq->cmd == READ)
+ if (rq_data_dir(creq) == READ)
dir = PCI_DMA_FROMDEVICE;
else
dir = PCI_DMA_TODEVICE;
DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
c->req.hdr.sg_cnt = seg;
c->req.hdr.blk_cnt = creq->nr_sectors;
- c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
+ c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
spin_lock_irq(&q->queue_lock);
next_rq = list_entry(next, struct request, queuelist);
- BUG_ON(!next_rq->inactive);
+ BUG_ON(next_rq->flags & REQ_STARTED);
/*
* if the device is different (not a normal case) just check if
/*
* can we safely merge with this request?
*/
-inline int elv_rq_merge_ok(request_queue_t *q, struct request *rq,
- struct bio *bio)
+inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
{
- if (bio_data_dir(bio) == rq->cmd) {
- if (rq->rq_dev == bio->bi_dev && !rq->waiting
- && !rq->special && rq->inactive)
- return 1;
- }
+ /*
+ * different data direction or already started, don't merge
+ */
+ if (bio_data_dir(bio) != rq_data_dir(rq))
+ return 0;
+ if (rq->flags & REQ_NOMERGE)
+ return 0;
+
+ /*
+ * same device and no special stuff set, merge is ok
+ */
+ if (rq->rq_dev == bio->bi_dev && !rq->waiting && !rq->special)
+ return 1;
return 0;
}
*/
if (__rq->elevator_sequence-- <= 0)
break;
-
- if (unlikely(__rq->waiting || __rq->special))
- continue;
- if (unlikely(!__rq->inactive))
+ if (__rq->flags & (REQ_BARRIER | REQ_STARTED))
break;
+
if (!*req && bio_rq_in_between(bio, __rq, &q->queue_head))
*req = __rq;
- if (!elv_rq_merge_ok(q, __rq, bio))
+ if (!elv_rq_merge_ok(__rq, bio))
continue;
if (__rq->elevator_sequence < count)
prefetch(list_entry_rq(entry->prev));
- if (unlikely(__rq->waiting || __rq->special))
- continue;
- if (unlikely(!__rq->inactive))
+ if (__rq->flags & (REQ_BARRIER | REQ_STARTED))
break;
- if (!elv_rq_merge_ok(q, __rq, bio))
+
+ if (!elv_rq_merge_ok(__rq, bio))
continue;
/*
DPRINT("request list destroyed in floppy request done\n");
} else {
- if (CURRENT->cmd == WRITE) {
+ if (rq_data_dir(CURRENT) == WRITE) {
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
FD_RAW_NEED_SEEK;
raw_cmd->cmd_count = NR_RW;
- if (CURRENT->cmd == READ){
+ if (rq_data_dir(CURRENT) == READ) {
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy,FD_READ);
- } else if (CURRENT->cmd == WRITE){
+ } else if (rq_data_dir(CURRENT) == WRITE){
raw_cmd->flags |= FD_RAW_WRITE;
COMMAND = FM_MODE(_floppy,FD_WRITE);
} else {
if (usage_count == 0) {
printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT);
- printk("sect=%ld cmd=%d\n", CURRENT->sector, CURRENT->cmd);
+ printk("sect=%ld flags=%lx\n", CURRENT->sector, CURRENT->flags);
return;
}
if (fdc_busy){
char buf[64];
int len, n;
- len = sprintf(page, "major minor #blocks name\n\n");
+ len = sprintf(page, "major minor #blocks start_sect nr_sects "
+ "name\n\n");
read_lock(&gendisk_lock);
for (gp = gendisk_head; gp; gp = gp->next) {
for (n = 0; n < (gp->nr_real << gp->minor_shift); n++) {
continue;
len += snprintf(page + len, 63,
- "%4d %4d %10d %s\n",
+ "%4d %4d %10d %10lu %10lu %s\n",
gp->major, n, gp->sizes[n],
+ gp->part[n].start_sect,
+ gp->part[n].nr_sects,
disk_name(gp, n, buf));
if (len < offset)
offset -= len, len = 0;
__u8 reserved;
} rhdr_t;
-#define SG_MAX 32
+#define SG_MAX 31
typedef struct {
rhdr_t hdr;
sg_t sg[SG_MAX];
#include <linux/bootmem.h>
#include <linux/completion.h>
#include <linux/compiler.h>
+#include <scsi/scsi.h>
#include <asm/system.h>
#include <asm/io.h>
q->seg_boundary_mask = mask;
}
+static char *rq_flags[] = { "REQ_RW", "REQ_RW_AHEAD", "REQ_BARRIER",
+ "REQ_CMD", "REQ_NOMERGE", "REQ_STARTED",
+ "REQ_DONTPREP", "REQ_DRIVE_CMD", "REQ_DRIVE_TASK",
+ "REQ_PC", "REQ_SENSE", "REQ_SPECIAL" };
+
+void blk_dump_rq_flags(struct request *rq, char *msg)
+{
+ int bit;
+
+ printk("%s: dev %x: ", msg, rq->rq_dev);
+ bit = 0;
+ do {
+ if (rq->flags & (1 << bit))
+ printk("%s ", rq_flags[bit]);
+ bit++;
+ } while (bit < __REQ_NR_BITS);
+
+ if (rq->flags & REQ_CMD)
+ printk("sector %lu, nr/cnr %lu/%u\n", rq->sector,
+ rq->nr_sectors,
+ rq->current_nr_sectors);
+
+ printk("\n");
+}
+
+/*
+ * standard prep_rq_fn that builds 10 byte cmds
+ */
+static int ll_10byte_cmd_build(request_queue_t *q, struct request *rq)
+{
+ int hard_sect = get_hardsect_size(rq->rq_dev);
+ sector_t block = rq->hard_sector / (hard_sect >> 9);
+ unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+
+ if (!(rq->flags & REQ_CMD))
+ return 0;
+
+ if (rq_data_dir(rq) == READ)
+ rq->cmd[0] = READ_10;
+ else
+ rq->cmd[0] = WRITE_10;
+
+ rq->cmd[1] = 0;
+
+ /*
+ * fill in lba
+ */
+ rq->cmd[2] = (block >> 24) & 0xff;
+ rq->cmd[3] = (block >> 16) & 0xff;
+ rq->cmd[4] = (block >> 8) & 0xff;
+ rq->cmd[5] = block & 0xff;
+ rq->cmd[6] = 0;
+
+ /*
+ * and transfer length
+ */
+ rq->cmd[7] = (blocks >> 8) & 0xff;
+ rq->cmd[8] = blocks & 0xff;
+
+ return 0;
+}
+
/*
* can we merge the two segments, or do we need to start a new one?
*/
unsigned long long lastend;
struct bio_vec *bvec;
struct bio *bio;
- int nsegs, i, cluster;
+ int nsegs, i, cluster, j;
nsegs = 0;
bio = rq->bio;
/*
* for each bio in rq
*/
+ j = 0;
rq_for_each_bio(bio, rq) {
+ j++;
/*
* for each segment in bio
*/
sg[nsegs - 1].length += nbytes;
} else {
new_segment:
- if (nsegs >= q->max_segments) {
+ if (nsegs > q->max_segments) {
printk("map: %d >= %d\n", nsegs, q->max_segments);
+ printk("map %d, %d, bio_sectors %d, vcnt %d\n", i, j, bio_sectors(bio), bio->bi_vcnt);
BUG();
}
* the standard queue merge functions, can be overridden with device
* specific ones if so desired
*/
-static inline int ll_new_segment(request_queue_t *q, struct request *req)
+static inline int ll_new_segment(request_queue_t *q, struct request *req,
+ struct bio *bio)
{
- if (req->nr_segments < q->max_segments) {
- req->nr_segments++;
+ if (req->nr_segments + bio->bi_vcnt < q->max_segments) {
+ req->nr_segments += bio->bi_vcnt;
return 1;
}
return 0;
if (blk_same_segment(q, req->biotail, bio))
return 1;
- return ll_new_segment(q, req);
+ return ll_new_segment(q, req, bio);
}
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
if (blk_same_segment(q, bio, req->bio))
return 1;
- return ll_new_segment(q, req);
+ return ll_new_segment(q, req, bio);
}
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
* This is called with interrupts off and no requests on the queue.
* (and with the request spinlock acquired)
*/
-static void blk_plug_device(request_queue_t *q)
+void blk_plug_device(request_queue_t *q)
{
/*
* common case
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
+ q->prep_rq_fn = ll_10byte_cmd_build;
q->plug_tq.sync = 0;
q->plug_tq.routine = &generic_unplug_device;
q->plug_tq.data = q;
rq = blkdev_free_rq(&rl->free);
list_del(&rq->queuelist);
rl->count--;
- rq->inactive = 1;
+ rq->flags = 0;
rq->rq_status = RQ_ACTIVE;
rq->special = NULL;
rq->q = q;
+ rq->rl = rl;
}
return rq;
return rq;
}
+struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
+{
+ struct request *rq;
+
+ BUG_ON(rw != READ && rw != WRITE);
+
+ rq = get_request(q, rw);
+
+ if (!rq && (gfp_mask & __GFP_WAIT))
+ rq = get_request_wait(q, rw);
+
+ return rq;
+}
+
+void blk_put_request(struct request *rq)
+{
+ blkdev_release_request(rq);
+}
+
/* RO fail safe mechanism */
static long ro_bits[MAX_BLKDEV][8];
else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
}
-void drive_stat_acct (kdev_t dev, int rw, unsigned long nr_sectors, int new_io)
+void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
- unsigned int major = MAJOR(dev);
+ unsigned int major = MAJOR(rq->rq_dev);
+ int rw = rq_data_dir(rq);
unsigned int index;
- index = disk_index(dev);
+ index = disk_index(rq->rq_dev);
if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
return;
static inline void add_request(request_queue_t * q, struct request * req,
struct list_head *insert_here)
{
- drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
+ drive_stat_acct(req, req->nr_sectors, 1);
- {
+ /*
+ * debug stuff...
+ */
+ if (insert_here == &q->queue_head) {
struct request *__rq = __elv_next_request(q);
- if (__rq && !__rq->inactive && insert_here == &q->queue_head)
- BUG();
+ BUG_ON(__rq && (__rq->flags & REQ_STARTED));
}
/*
*/
void blkdev_release_request(struct request *req)
{
- request_queue_t *q = req->q;
- int rw = req->cmd;
+ struct request_list *rl = req->rl;
req->rq_status = RQ_INACTIVE;
req->q = NULL;
+ req->rl = NULL;
/*
* Request may not have originated from ll_rw_blk. if not,
- * assume it has free buffers and check waiters
+ * it didn't come out of our reserved rq pools
*/
- if (q) {
- list_add(&req->queuelist, &q->rq[rw].free);
- if (++q->rq[rw].count >= batch_requests
- && waitqueue_active(&q->rq[rw].wait))
- wake_up(&q->rq[rw].wait);
+ if (rl) {
+ list_add(&req->queuelist, &rl->free);
+
+ if (++rl->count >= batch_requests &&waitqueue_active(&rl->wait))
+ wake_up(&rl->wait);
}
}
{
struct request *next = blkdev_next_request(req);
+ /*
+ * not a rw command
+ */
+ if (!(next->flags & REQ_CMD))
+ return;
+
+ /*
+ * not contigious
+ */
if (req->sector + req->nr_sectors != next->sector)
return;
- if (req->cmd != next->cmd
+ /*
+ * don't touch NOMERGE rq, or one that has been started by driver
+ */
+ if (next->flags & (REQ_NOMERGE | REQ_STARTED))
+ return;
+
+ if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_dev != next->rq_dev
|| req->nr_sectors + next->nr_sectors > q->max_sectors
- || next->waiting || next->special || !next->inactive)
+ || next->waiting || next->special)
return;
/*
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
- next->bio = next->biotail = NULL;
-
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
blkdev_release_request(next);
spin_lock_prefetch(&q->queue_lock);
latency = elevator_request_latency(elevator, rw);
-
- barrier = test_bit(BIO_BARRIER, &bio->bi_flags);
+ barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw);
again:
req = NULL;
spin_lock_irq(&q->queue_lock);
- /*
- * barrier write must not be passed - so insert with 0 latency at
- * the back of the queue and invalidate the entire existing merge hash
- * for this device
- */
- if (barrier && !freereq)
- latency = 0;
-
insert_here = head->prev;
if (blk_queue_empty(q) || barrier) {
blk_plug_device(q);
goto get_rq;
-#if 0
- } else if (test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
- head = head->next;
-#else
} else if ((req = __elv_next_request(q))) {
- if (!req->inactive)
+ if (req->flags & REQ_STARTED)
head = head->next;
req = NULL;
-#endif
}
el_ret = elevator->elevator_merge_fn(q, &req, head, bio);
switch (el_ret) {
case ELEVATOR_BACK_MERGE:
- if (&req->queuelist == head && !req->inactive)
- BUG();
+ BUG_ON(req->flags & REQ_STARTED);
+ BUG_ON(req->flags & REQ_NOMERGE);
if (!q->back_merge_fn(q, req, bio))
break;
elevator->elevator_merge_cleanup_fn(q, req, nr_sectors);
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
- drive_stat_acct(req->rq_dev, req->cmd, nr_sectors, 0);
+ drive_stat_acct(req, nr_sectors, 0);
attempt_back_merge(q, req);
goto out;
case ELEVATOR_FRONT_MERGE:
- if (&req->queuelist == head && !req->inactive)
- BUG();
+ BUG_ON(req->flags & REQ_STARTED);
+ BUG_ON(req->flags & REQ_NOMERGE);
if (!q->front_merge_fn(q, req, bio))
break;
elevator->elevator_merge_cleanup_fn(q, req, nr_sectors);
req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
- drive_stat_acct(req->rq_dev, req->cmd, nr_sectors, 0);
+ drive_stat_acct(req, nr_sectors, 0);
attempt_front_merge(q, head, req);
goto out;
/*
* READA bit set
*/
- if (bio->bi_rw & RWA_MASK) {
+ if (bio->bi_rw & (1 << BIO_RW_AHEAD)) {
set_bit(BIO_RW_BLOCK, &bio->bi_flags);
goto end_io;
}
* fill up the request-info, and add it to the queue
*/
req->elevator_sequence = latency;
- req->cmd = rw;
+
+ /*
+ * first three bits are identical in rq->flags and bio->bi_rw,
+ * see bio.h and blkdev.h
+ */
+ req->flags = (bio->bi_rw & 7) | REQ_CMD;
+
+ /*
+ * REQ_BARRIER implies no merging, but lets make it explicit
+ */
+ if (barrier)
+ req->flags |= (REQ_BARRIER | REQ_NOMERGE);
+
req->errors = 0;
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
req->rq_dev = bio->bi_dev;
add_request(q, req, insert_here);
out:
- if (freereq) {
- freereq->bio = freereq->biotail = NULL;
+ if (freereq)
blkdev_release_request(freereq);
- }
-
spin_unlock_irq(&q->queue_lock);
return 0;
}
/*
- * uh oh, need to split this bio... not implemented yet
+ * this needs to be handled by q->make_request_fn, to just
+ * setup a part of the bio in the request to enable easy
+ * multiple passing
*/
- if (bio_sectors(bio) > q->max_sectors)
- BUG();
+ BUG_ON(bio_sectors(bio) > q->max_sectors);
/*
* If this device has partitions, remap block n
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
-
return 0;
}
int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
{
struct bio *bio, *nxt;
- int nsect;
+ int nsect, total_nsect = 0;
req->errors = 0;
if (!uptodate)
nsect = bio_iovec(bio)->bv_len >> 9;
nr_sectors -= nsect;
-
- nxt = bio->bi_next;
- bio->bi_next = NULL;
- if (!bio_endio(bio, uptodate, nsect))
- req->bio = nxt;
- else
- bio->bi_next = nxt;
+ total_nsect += nsect;
+
+ if (++bio->bi_idx >= bio->bi_vcnt) {
+ nxt = bio->bi_next;
+ if (!bio_endio(bio, uptodate, total_nsect)) {
+ total_nsect = 0;
+ req->bio = nxt;
+ } else
+ BUG();
+ }
if ((bio = req->bio) != NULL) {
req->hard_sector += nsect;
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
+EXPORT_SYMBOL(blk_dump_rq_flags);
void nbd_send_req(struct socket *sock, struct request *req)
{
- int result;
+ int result, rw, i, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
DEBUG("NBD: sending control, ");
request.magic = htonl(NBD_REQUEST_MAGIC);
- request.type = htonl(req->cmd);
+ request.type = htonl(req->flags);
request.from = cpu_to_be64( (u64) req->sector << 9);
request.len = htonl(size);
memcpy(request.handle, &req, sizeof(req));
- result = nbd_xmit(1, sock, (char *) &request, sizeof(request), req->cmd == WRITE ? MSG_MORE : 0);
+ rw = rq_data_dir(req);
+
+ result = nbd_xmit(1, sock, (char *) &request, sizeof(request), rw & WRITE ? MSG_MORE : 0);
if (result <= 0)
FAIL("Sendmsg failed for control.");
- if (req->cmd == WRITE) {
- struct bio *bio = req->bio;
- DEBUG("data, ");
- do {
- result = nbd_xmit(1, sock, bio_data(bio), bio->bi_size, bio->bi_next == NULL ? 0 : MSG_MORE);
- if (result <= 0)
- FAIL("Send data failed.");
- bio = bio->bi_next;
- } while(bio);
+ if (rw & WRITE) {
+ struct bio *bio;
+ /*
+ * we are really probing at internals to determine
+ * whether to set MSG_MORE or not...
+ */
+ rq_for_each_bio(bio, req) {
+ struct bio_vec *bvec;
+ bio_for_each_segment(bvec, bio, i) {
+ flags = 0;
+ if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
+ flags = MSG_MORE;
+ DEBUG("data, ");
+ result = nbd_xmit(1, sock, page_address(bvec->bv_page) + bvec->bv_offset, bvec->bv_len, flags);
+ if (result <= 0)
+ FAIL("Send data failed.");
+ }
+ }
}
return;
HARDFAIL("Not enough magic.");
if (ntohl(reply.error))
FAIL("Other side returned error.");
- if (req->cmd == READ) {
+ if (rq_data_dir(req) == READ) {
struct bio *bio = req->bio;
DEBUG("data, ");
do {
if (dev >= MAX_NBD)
FAIL("Minor too big."); /* Probably can not happen */
#endif
+ if (!(req->flags & REQ_CMD))
+ goto error_out;
+
lo = &nbd_dev[dev];
if (!lo->file)
FAIL("Request when not-ready.");
- if ((req->cmd == WRITE) && (lo->flags & NBD_READ_ONLY))
+ if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_READ_ONLY))
FAIL("Write on read-only");
#ifdef PARANOIA
if (lo->magic != LO_MAGIC)
switch (cmd) {
case NBD_DISCONNECT:
printk("NBD_DISCONNECT\n") ;
- sreq.cmd=2 ; /* shutdown command */
+ sreq.flags = REQ_SPECIAL; /* FIXME: interpet as shutdown cmd */
if (!lo->sock) return -EINVAL ;
nbd_send_req(lo->sock,&sreq) ;
return 0 ;
while (1) {
if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return;
INIT_REQUEST;
- if (CURRENT->cmd == READ) {
+ if (rq_data_dir(CURRENT) == READ) {
unit = MINOR(CURRENT->rq_dev);
if (unit != pcd_unit) {
pcd_bufblk = -1;
goto repeat;
}
- pd_cmd = CURRENT->cmd;
+ pd_cmd = rq_data_dir(CURRENT);
pd_buf = CURRENT->buffer;
pd_retries = 0;
/* paranoia */
if (QUEUE_EMPTY ||
- (CURRENT->cmd != pd_cmd) ||
+ (rq_data_dir(CURRENT) != pd_cmd) ||
(MINOR(CURRENT->rq_dev) != pd_dev) ||
(CURRENT->rq_status == RQ_INACTIVE) ||
(CURRENT->sector != pd_block))
goto repeat;
}
- pf_cmd = CURRENT->cmd;
+ pf_cmd = rq_data_dir(CURRENT);
pf_buf = CURRENT->buffer;
pf_retries = 0;
/* paranoia */
if (QUEUE_EMPTY ||
- (CURRENT->cmd != pf_cmd) ||
+ (rq_data_dir(CURRENT) != pf_cmd) ||
(DEVICE_NR(CURRENT->rq_dev) != pf_unit) ||
(CURRENT->rq_status == RQ_INACTIVE) ||
(CURRENT->sector != pf_block))
ioctl: rd_ioctl,
};
-#ifdef MODULE
/* Before freeing the module, invalidate all of the protected buffers! */
static void __exit rd_cleanup (void)
{
unregister_blkdev( MAJOR_NR, "ramdisk" );
blk_clear(MAJOR_NR);
}
-#endif
/* This is the registration and initialization section of the RAM disk driver */
int __init rd_init (void)
/* Define this to remove _all_ the debugging messages */
/* #define ERRLOGMASK CD_NOTHING */
-#define ERRLOGMASK (CD_WARNING)
+#define ERRLOGMASK CD_WARNING
/* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
/* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
/* stuff the sense request in front of our current request */
rq = &info->request_sense_request;
ide_init_drive_cmd(rq);
- rq->cmd = REQUEST_SENSE_COMMAND;
- rq->buffer = (char *) pc;
+ rq->flags = REQ_SENSE;
+ rq->special = (char *) pc;
rq->waiting = wait;
(void) ide_do_drive_cmd(drive, rq, ide_preempt);
}
{
struct request *rq = HWGROUP(drive)->rq;
- if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate) {
- struct packet_command *pc = (struct packet_command *) rq->buffer;
+ if ((rq->flags & REQ_SENSE) && uptodate) {
+ struct packet_command *pc = (struct packet_command *) rq->special;
cdrom_analyze_sense_data(drive,
(struct packet_command *) pc->sense,
(struct request_sense *) (pc->buffer - pc->c[4]));
}
- if (rq->cmd == READ || rq->cmd == WRITE)
- if (!rq->current_nr_sectors)
- uptodate = 1;
- ide_end_request (uptodate, HWGROUP(drive));
+ if ((rq->flags & REQ_CMD) && !rq->current_nr_sectors)
+ uptodate = 1;
+
+ ide_end_request(uptodate, HWGROUP(drive));
}
return 1;
}
- if (rq->cmd == REQUEST_SENSE_COMMAND) {
+ if (rq->flags & REQ_SENSE) {
/* We got an error trying to get sense info
from the drive (probably while trying
to recover from a former error). Just give up. */
- pc = (struct packet_command *) rq->buffer;
+ pc = (struct packet_command *) rq->special;
pc->stat = 1;
cdrom_end_request (1, drive);
*startstop = ide_error (drive, "request sense failure", stat);
return 1;
-
- } else if (rq->cmd == PACKET_COMMAND) {
+ } else if (rq->flags & REQ_PC) {
/* All other functions, except for READ. */
struct completion *wait = NULL;
- pc = (struct packet_command *) rq->buffer;
+ pc = (struct packet_command *) rq->special;
/* Check for tray open. */
if (sense_key == NOT_READY) {
if ((stat & ERR_STAT) != 0)
cdrom_queue_request_sense(drive, wait, pc->sense, pc);
- } else {
+ } else if (rq->flags & REQ_CMD) {
/* Handle errors from READ and WRITE requests. */
if (sense_key == NOT_READY) {
queue a request sense command. */
if ((stat & ERR_STAT) != 0)
cdrom_queue_request_sense(drive, NULL, NULL, NULL);
- }
+ } else
+ blk_dump_rq_flags(rq, "ide-cd bad flags");
/* Retry, or handle the next request. */
*startstop = ide_stopped;
static int cdrom_timer_expiry(ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *) rq->buffer;
unsigned long wait = 0;
/*
* this, but not all commands/drives support that. Let
* ide_timer_expiry keep polling us for these.
*/
- switch (pc->c[0]) {
+ switch (rq->cmd[0]) {
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
wait = 0;
break;
}
+
return wait;
}
(65534 / CD_FRAMESIZE) : 65535);
/* Set up the command */
- memset (&pc.c, 0, sizeof (pc.c));
- pc.c[0] = GPCMD_READ_10;
- pc.c[7] = (nframes >> 8);
- pc.c[8] = (nframes & 0xff);
- put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]);
+ memcpy(pc.c, rq->cmd, sizeof(pc.c));
pc.timeout = WAIT_CMD;
/* Send the command to the drive and return. */
sector -= nskip;
frame = sector / SECTORS_PER_FRAME;
- memset (&pc.c, 0, sizeof (pc.c));
+ memset(rq->cmd, 0, sizeof(rq->cmd));
pc.c[0] = GPCMD_SEEK;
put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]);
return cdrom_start_packet_command (drive, 0, cdrom_start_seek_continuation);
}
-/* Fix up a possibly partially-processed request so that we can
- start it over entirely */
+/*
+ * Fix up a possibly partially-processed request so that we can
+ * start it over entirely -- remember to call prep_rq_fn again since we
+ * may have changed the layout
+ */
static void restore_request (struct request *rq)
{
if (rq->buffer != bio_data(rq->bio)) {
rq->hard_cur_sectors = rq->current_nr_sectors = bio_sectors(rq->bio);
rq->hard_nr_sectors = rq->nr_sectors;
rq->hard_sector = rq->sector;
+ rq->q->prep_rq_fn(rq->q, rq);
}
/*
{
struct cdrom_info *info = drive->driver_data;
struct request *rq = HWGROUP(drive)->rq;
- int minor = MINOR (rq->rq_dev);
-
- /* If the request is relative to a partition, fix it up to refer to the
- absolute address. */
- if (minor & PARTN_MASK) {
- rq->sector = block;
- minor &= ~PARTN_MASK;
- rq->rq_dev = MKDEV(MAJOR(rq->rq_dev), minor);
- }
- /* We may be retrying this request after an error. Fix up
- any weirdness which might be present in the request packet. */
restore_request(rq);
/* Satisfy whatever we can of this request from our cached sector. */
{
int ireason, len, stat, thislen;
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *)rq->buffer;
+ struct packet_command *pc = (struct packet_command *) rq->special;
ide_startstop_t startstop;
/* Check for errors. */
static ide_startstop_t cdrom_do_pc_continuation (ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *)rq->buffer;
+ struct packet_command *pc = (struct packet_command *) rq->special;
if (!pc->timeout)
pc->timeout = WAIT_CMD;
{
int len;
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *)rq->buffer;
+ struct packet_command *pc = (struct packet_command *) rq->special;
struct cdrom_info *info = drive->driver_data;
info->dma = 0;
/* Start of retry loop. */
do {
ide_init_drive_cmd (&req);
- req.cmd = PACKET_COMMAND;
- req.buffer = (char *)pc;
+ req.flags = REQ_PC;
+ req.special = (char *) pc;
if (ide_do_drive_cmd (drive, &req, ide_wait)) {
printk("%s: do_drive_cmd returned stat=%02x,err=%02x\n",
drive->name, req.buffer[0], req.buffer[1]);
nframes = rq->nr_sectors >> 2;
frame = rq->sector >> 2;
- memset(&pc.c, 0, sizeof(pc.c));
- /*
- * we might as well use WRITE_12, but none of the device I have
- * support the streaming feature anyway, so who cares.
- */
- pc.c[0] = GPCMD_WRITE_10;
+ memcpy(pc.c, rq->cmd, sizeof(pc.c));
#if 0 /* the immediate bit */
pc.c[1] = 1 << 3;
#endif
- pc.c[7] = (nframes >> 8) & 0xff;
- pc.c[8] = nframes & 0xff;
- put_unaligned(cpu_to_be32(frame), (unsigned int *)&pc.c[2]);
pc.timeout = 2 * WAIT_CMD;
return cdrom_transfer_packet_command(drive, &pc, cdrom_write_intr);
return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont);
}
+/*
+ * just wrap this around cdrom_do_packet_command
+ */
+static int cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
+{
+ struct packet_command pc;
+ ide_startstop_t startstop;
+
+ memset(&pc, 0, sizeof(pc));
+ memcpy(pc.c, rq->cmd, sizeof(pc.c));
+ pc.quiet = 1;
+ pc.timeout = 60 * HZ;
+ rq->special = (char *) &pc;
+
+ startstop = cdrom_do_packet_command(drive);
+ if (pc.stat)
+ rq->errors++;
+
+ return startstop;
+}
+
+
/****************************************************************************
* cdrom driver request routine.
*/
ide_startstop_t action;
struct cdrom_info *info = drive->driver_data;
- switch (rq->cmd) {
- case WRITE:
- case READ: {
- if (CDROM_CONFIG_FLAGS(drive)->seeking) {
- unsigned long elpased = jiffies - info->start_seek;
- int stat = GET_STAT();
-
- if ((stat & SEEK_STAT) != SEEK_STAT) {
- if (elpased < IDECD_SEEK_TIMEOUT) {
- ide_stall_queue(drive, IDECD_SEEK_TIMER);
- return ide_stopped;
- }
- printk ("%s: DSC timeout\n", drive->name);
+ if (rq->flags & REQ_CMD) {
+ if (CDROM_CONFIG_FLAGS(drive)->seeking) {
+ unsigned long elpased = jiffies - info->start_seek;
+ int stat = GET_STAT();
+
+ if ((stat & SEEK_STAT) != SEEK_STAT) {
+ if (elpased < IDECD_SEEK_TIMEOUT) {
+ ide_stall_queue(drive, IDECD_SEEK_TIMER);
+ return ide_stopped;
}
- CDROM_CONFIG_FLAGS(drive)->seeking = 0;
+ printk ("%s: DSC timeout\n", drive->name);
}
- if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap)
- action = cdrom_start_seek (drive, block);
- else {
- if (rq->cmd == READ)
- action = cdrom_start_read(drive, block);
- else
- action = cdrom_start_write(drive, rq);
- }
- info->last_block = block;
- return action;
- }
-
- case PACKET_COMMAND:
- case REQUEST_SENSE_COMMAND: {
- return cdrom_do_packet_command(drive);
- }
-
- case RESET_DRIVE_COMMAND: {
- cdrom_end_request(1, drive);
- return ide_do_reset(drive);
+ CDROM_CONFIG_FLAGS(drive)->seeking = 0;
}
-
- default: {
- printk("ide-cd: bad cmd %d\n", rq->cmd);
- cdrom_end_request(0, drive);
- return ide_stopped;
+ if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap)
+ action = cdrom_start_seek (drive, block);
+ else {
+ if (rq_data_dir(rq) == READ)
+ action = cdrom_start_read(drive, block);
+ else
+ action = cdrom_start_write(drive, rq);
}
+ info->last_block = block;
+ return action;
+ } else if (rq->flags & (REQ_PC | REQ_SENSE)) {
+ return cdrom_do_packet_command(drive);
+ } else if (rq->flags & REQ_SPECIAL) {
+ /*
+ * right now this can only be a reset...
+ */
+ cdrom_end_request(1, drive);
+ return ide_do_reset(drive);
+ } else if (rq->flags & REQ_BLOCK_PC) {
+ return cdrom_do_block_pc(drive, rq);
}
+
+ blk_dump_rq_flags(rq, "ide-cd bad flags");
+ cdrom_end_request(0, drive);
+ return ide_stopped;
}
return cgc->stat;
}
+
static
int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi,
unsigned int cmd, unsigned long arg)
int ret;
ide_init_drive_cmd (&req);
- req.cmd = RESET_DRIVE_COMMAND;
+ req.flags = REQ_SPECIAL;
ret = ide_do_drive_cmd(drive, &req, ide_wait);
/*
*/
set_device_ro(MKDEV(HWIF(drive)->major, minor), 1);
set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
+ blk_queue_hardsect_size(&drive->queue, CD_FRAMESIZE);
drive->special.all = 0;
drive->ready_stat = 0;
#endif /* CONFIG_BLK_DEV_PDC4030 */
#ifdef DEBUG
printk("%s: %sing: LBAsect=%ld, sectors=%ld, buffer=0x%08lx\n",
- drive->name, (rq->cmd==READ)?"read":"writ",
+ drive->name, (rq_data_dir(rq)==READ)?"read":"writ",
block, rq->nr_sectors, (unsigned long) rq->buffer);
#endif
OUT_BYTE(block,IDE_SECTOR_REG);
OUT_BYTE(head|drive->select.all,IDE_SELECT_REG);
#ifdef DEBUG
printk("%s: %sing: CHS=%d/%d/%d, sectors=%ld, buffer=0x%08lx\n",
- drive->name, (rq->cmd==READ)?"read":"writ", cyl,
+ drive->name, (rq_data_dir(rq)==READ)?"read":"writ", cyl,
head, sect, rq->nr_sectors, (unsigned long) rq->buffer);
#endif
}
return do_pdc4030_io (drive, rq);
}
#endif /* CONFIG_BLK_DEV_PDC4030 */
- if (rq->cmd == READ) {
+ if (rq_data_dir(rq) == READ) {
#ifdef CONFIG_BLK_DEV_IDEDMA
if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_read, drive)))
return ide_started;
OUT_BYTE(drive->mult_count ? WIN_MULTREAD : WIN_READ, IDE_COMMAND_REG);
return ide_started;
}
- if (rq->cmd == WRITE) {
+ if (rq_data_dir(rq) == WRITE) {
ide_startstop_t startstop;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_write, drive)))
}
return ide_started;
}
- printk(KERN_ERR "%s: bad command: %d\n", drive->name, rq->cmd);
+ printk(KERN_ERR "%s: bad command: %lx\n", drive->name, rq->flags);
ide_end_request(0, HWGROUP(drive));
return ide_stopped;
}
if (nents > rq->nr_segments)
printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
- if (rq->cmd == READ)
+ if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
else
hwif->sg_dma_direction = PCI_DMA_TODEVICE;
#include <linux/ide.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/completion.h>
+#include <linux/cdrom.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
spin_lock_irqsave(&ide_lock, flags);
rq = hwgroup->rq;
- if (rq->inactive)
- BUG();
+ BUG_ON(!(rq->flags & REQ_STARTED));
/*
* small hack to eliminate locking from ide_end_request to grab
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
- if (rq->cmd == IDE_DRIVE_CMD) {
+ if (rq->flags & REQ_DRIVE_CMD) {
byte *args = (byte *) rq->buffer;
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
if (args) {
args[1] = err;
args[2] = IN_BYTE(IDE_NSECTOR_REG);
}
- } else if (rq->cmd == IDE_DRIVE_TASK) {
+ } else if (rq->flags & REQ_DRIVE_TASK) {
byte *args = (byte *) rq->buffer;
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
if (args) {
}
spin_lock(DRIVE_LOCK(drive));
- if (rq->inactive)
- BUG();
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
end_that_request_last(rq);
if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
return ide_stopped;
/* retry only "normal" I/O: */
- if (rq->cmd == IDE_DRIVE_CMD || rq->cmd == IDE_DRIVE_TASK) {
+ if (!(rq->flags & REQ_CMD)) {
rq->errors = 1;
ide_end_drive_cmd(drive, stat, err);
return ide_stopped;
else if (err & TRK0_ERR) /* help it find track zero */
rq->errors |= ERROR_RECAL;
}
- if ((stat & DRQ_STAT) && rq->cmd != WRITE)
+ if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ)
try_to_flush_leftover_data(drive);
}
if (GET_STAT() & (BUSY_STAT|DRQ_STAT))
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq)
{
byte *args = rq->buffer;
- if (args && rq->cmd == IDE_DRIVE_TASK) {
+ if (args && (rq->flags & REQ_DRIVE_TASK)) {
byte sel;
#ifdef DEBUG
printk("%s: DRIVE_TASK_CMD data=x%02x cmd=0x%02x fr=0x%02x ns=0x%02x sc=0x%02x lcyl=0x%02x hcyl=0x%02x sel=0x%02x\n",
unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
ide_hwif_t *hwif = HWIF(drive);
- if (rq->inactive)
- BUG();
+ BUG_ON(!(rq->flags & REQ_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);
block = rq->sector;
/* Strange disk manager remap */
- if ((rq->cmd == READ || rq->cmd == WRITE) &&
+ if ((rq->flags & REQ_CMD) &&
(drive->media == ide_disk || drive->media == ide_floppy)) {
block += drive->sect0;
}
return startstop;
}
if (!drive->special.all) {
- if (rq->cmd == IDE_DRIVE_CMD || rq->cmd == IDE_DRIVE_TASK) {
+ if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK))
return execute_drive_cmd(drive, rq);
- }
+
if (drive->driver != NULL) {
return (DRIVER(drive)->do_request(drive, rq, block));
}
void ide_init_drive_cmd (struct request *rq)
{
memset(rq, 0, sizeof(*rq));
- rq->cmd = IDE_DRIVE_CMD;
+ rq->flags = REQ_DRIVE_CMD;
}
/*
struct request rq;
ide_init_drive_cmd(&rq);
- rq.cmd = IDE_DRIVE_TASK;
+ rq.flags = REQ_DRIVE_TASK;
rq.buffer = buf;
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
case BLKBSZSET:
return blk_ioctl(inode->i_rdev, cmd, arg);
+ /*
+ * uniform packet command handling
+ */
+ case CDROMEJECT:
+ case CDROMCLOSETRAY:
+ return block_ioctl(inode->i_rdev, cmd, arg);
+
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
struct capincci *nccip;
unsigned int minor;
- __u16 applid;
- __u32 ncci;
- __u16 datahandle;
- __u16 msgid;
+ u16 applid;
+ u32 ncci;
+ u16 datahandle;
+ u16 msgid;
struct file *file;
struct tty_struct *tty;
/* transmit path */
struct datahandle_queue {
struct datahandle_queue *next;
- __u16 datahandle;
+ u16 datahandle;
} *ackqueue;
int nack;
struct capincci {
struct capincci *next;
- __u32 ncci;
+ u32 ncci;
struct capidev *cdev;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor *minorp;
struct capidev {
struct capidev *next;
struct file *file;
- __u16 applid;
- __u16 errcode;
+ u16 applid;
+ u16 errcode;
unsigned int minor;
unsigned userflags;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
/* -------- datahandles --------------------------------------------- */
-int capincci_add_ack(struct capiminor *mp, __u16 datahandle)
+static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
{
struct datahandle_queue *n, **pp;
return 0;
}
-int capiminor_del_ack(struct capiminor *mp, __u16 datahandle)
+static int capiminor_del_ack(struct capiminor *mp, u16 datahandle)
{
struct datahandle_queue **pp, *p;
return -1;
}
-void capiminor_del_all_ack(struct capiminor *mp)
+static void capiminor_del_all_ack(struct capiminor *mp)
{
struct datahandle_queue **pp, *p;
/* -------- struct capiminor ---------------------------------------- */
-struct capiminor *capiminor_alloc(__u16 applid, __u32 ncci)
+static struct capiminor *capiminor_alloc(u16 applid, u32 ncci)
{
struct capiminor *mp, **pp;
unsigned int minor = 0;
return mp;
}
-void capiminor_free(struct capiminor *mp)
+static void capiminor_free(struct capiminor *mp)
{
struct capiminor **pp;
}
}
-struct capiminor *capiminor_find(unsigned int minor)
+static struct capiminor *capiminor_find(unsigned int minor)
{
struct capiminor *p;
for (p = minors; p && p->minor != minor; p = p->next)
/* -------- struct capincci ----------------------------------------- */
-static struct capincci *capincci_alloc(struct capidev *cdev, __u32 ncci)
+static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
{
struct capincci *np, **pp;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
return np;
}
-static void capincci_free(struct capidev *cdev, __u32 ncci)
+static void capincci_free(struct capidev *cdev, u32 ncci)
{
struct capincci *np, **pp;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
}
}
-struct capincci *capincci_find(struct capidev *cdev, __u32 ncci)
+static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
{
struct capincci *p;
kmem_cache_free(capidev_cachep, cdev);
}
-static struct capidev *capidev_find(__u16 applid)
+static struct capidev *capidev_find(u16 applid)
{
struct capidev *p;
for (p=capidev_openlist; p; p = p->next) {
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
/* -------- handle data queue --------------------------------------- */
-struct sk_buff *
+static struct sk_buff *
gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
{
struct sk_buff *nskb;
nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_ATOMIC);
if (nskb) {
- __u16 datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4+4+2);
+ u16 datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4+4+2);
unsigned char *s = skb_put(nskb, CAPI_DATA_B3_RESP_LEN);
capimsg_setu16(s, 0, CAPI_DATA_B3_RESP_LEN);
capimsg_setu16(s, 2, mp->applid);
return nskb;
}
-int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
+static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
{
struct sk_buff *nskb;
unsigned int datalen;
- __u16 errcode, datahandle;
+ u16 errcode, datahandle;
datalen = skb->len - CAPIMSG_LEN(skb->data);
if (mp->tty) {
return -1;
}
-void handle_minor_recv(struct capiminor *mp)
+static void handle_minor_recv(struct capiminor *mp)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&mp->inqueue)) != 0) {
}
}
-int handle_minor_send(struct capiminor *mp)
+static int handle_minor_send(struct capiminor *mp)
{
struct sk_buff *skb;
- __u16 len;
+ u16 len;
int count = 0;
- __u16 errcode;
- __u16 datahandle;
+ u16 errcode;
+ u16 datahandle;
if (mp->tty && mp->ttyoutstop) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
while ((skb = skb_dequeue(&mp->outqueue)) != 0) {
datahandle = mp->datahandle;
- len = (__u16)skb->len;
+ len = (u16)skb->len;
skb_push(skb, CAPI_DATA_B3_REQ_LEN);
memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu8 (skb->data, 5, CAPI_REQ);
capimsg_setu16(skb->data, 6, mp->msgid++);
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
- capimsg_setu32(skb->data, 12, (__u32) skb->data); /* Data32 */
+ capimsg_setu32(skb->data, 12, (u32) skb->data); /* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
capimsg_setu16(skb->data, 18, datahandle);
capimsg_setu16(skb->data, 20, 0); /* Flags */
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- function called by lower level -------------------------- */
-static void capi_signal(__u16 applid, void *param)
+static void capi_signal(u16 applid, void *param)
{
struct capidev *cdev = (struct capidev *)param;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor *mp;
- __u16 datahandle;
+ u16 datahandle;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
struct capincci *np;
struct sk_buff *skb = 0;
- __u32 ncci;
+ u32 ncci;
(void) (*capifuncs->capi_get_message) (applid, &skb);
if (!skb) {
struct capidev *cdev = (struct capidev *)file->private_data;
struct sk_buff *skb;
int retval;
- __u16 mlen;
+ u16 mlen;
if (ppos != &file->f_pos)
return -ESPIPE;
sizeof(ncci));
if (retval)
return -EFAULT;
- nccip = capincci_find(cdev, (__u32) ncci);
+ nccip = capincci_find(cdev, (u32) ncci);
if (!nccip)
return 0;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
sizeof(ncci));
if (retval)
return -EFAULT;
- nccip = capincci_find(cdev, (__u32) ncci);
+ nccip = capincci_find(cdev, (u32) ncci);
if (!nccip || (mp = nccip->minorp) == 0)
return -ESRCH;
return mp->minor;
/* -------- tty_operations for capincci ----------------------------- */
-int capinc_tty_open(struct tty_struct * tty, struct file * file)
+static int capinc_tty_open(struct tty_struct * tty, struct file * file)
{
struct capiminor *mp;
return 0;
}
-void capinc_tty_close(struct tty_struct * tty, struct file * file)
+static void capinc_tty_close(struct tty_struct * tty, struct file * file)
{
struct capiminor *mp;
#endif
}
-int capinc_tty_write(struct tty_struct * tty, int from_user,
- const unsigned char *buf, int count)
+static int capinc_tty_write(struct tty_struct * tty, int from_user,
+ const unsigned char *buf, int count)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
struct sk_buff *skb;
return count;
}
-void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
+static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
struct sk_buff *skb;
}
}
-void capinc_tty_flush_chars(struct tty_struct *tty)
+static void capinc_tty_flush_chars(struct tty_struct *tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
struct sk_buff *skb;
(void)handle_minor_recv(mp);
}
-int capinc_tty_write_room(struct tty_struct *tty)
+static int capinc_tty_write_room(struct tty_struct *tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
int room;
return room;
}
-int capinc_tty_chars_in_buffer(struct tty_struct *tty)
+static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
if (!mp || !mp->nccip) {
return mp->outbytes;
}
-int capinc_tty_ioctl(struct tty_struct *tty, struct file * file,
+static int capinc_tty_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
int error = 0;
return error;
}
-void capinc_tty_set_termios(struct tty_struct *tty, struct termios * old)
+static void capinc_tty_set_termios(struct tty_struct *tty, struct termios * old)
{
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_set_termios\n");
#endif
}
-void capinc_tty_throttle(struct tty_struct * tty)
+static void capinc_tty_throttle(struct tty_struct * tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
#ifdef _DEBUG_TTYFUNCS
mp->ttyinstop = 1;
}
-void capinc_tty_unthrottle(struct tty_struct * tty)
+static void capinc_tty_unthrottle(struct tty_struct * tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
#ifdef _DEBUG_TTYFUNCS
}
}
-void capinc_tty_stop(struct tty_struct *tty)
+static void capinc_tty_stop(struct tty_struct *tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
#ifdef _DEBUG_TTYFUNCS
}
}
-void capinc_tty_start(struct tty_struct *tty)
+static void capinc_tty_start(struct tty_struct *tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
#ifdef _DEBUG_TTYFUNCS
}
}
-void capinc_tty_hangup(struct tty_struct *tty)
+static void capinc_tty_hangup(struct tty_struct *tty)
{
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_hangup\n");
#endif
}
-void capinc_tty_break_ctl(struct tty_struct *tty, int state)
+static void capinc_tty_break_ctl(struct tty_struct *tty, int state)
{
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_break_ctl(%d)\n", state);
#endif
}
-void capinc_tty_flush_buffer(struct tty_struct *tty)
+static void capinc_tty_flush_buffer(struct tty_struct *tty)
{
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_flush_buffer\n");
#endif
}
-void capinc_tty_set_ldisc(struct tty_struct *tty)
+static void capinc_tty_set_ldisc(struct tty_struct *tty)
{
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_set_ldisc\n");
#endif
}
-void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
+static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
{
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_send_xchar(%d)\n", ch);
#endif
}
-int capinc_tty_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- return 0;
-}
-
-int capinc_write_proc(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int capinc_tty_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
{
return 0;
}
static struct termios *capinc_tty_termios[CAPINC_NR_PORTS];
static struct termios *capinc_tty_termios_locked[CAPINC_NR_PORTS];
-int capinc_tty_init(void)
+static int capinc_tty_init(void)
{
struct tty_driver *drv = &capinc_tty_driver;
return 0;
}
-void capinc_tty_exit(void)
+static void capinc_tty_exit(void)
{
struct tty_driver *drv = &capinc_tty_driver;
int retval;
return 0;
}
-static void lower_callback(unsigned int cmd, __u32 contr, void *data)
+static void lower_callback(unsigned int cmd, u32 contr, void *data)
{
struct capi_ncciinfo *np;
struct capidev *cdev;
divert_info_head = divert_info_head->next;
kfree(inf);
}
- spin_unlock_irq( &divert_info_lock, flags );
+ spin_unlock_irqrestore( &divert_info_lock, flags );
return (0);
} /* isdn_divert_close */
#ifdef CONFIG_PROC_FS
static struct file_operations isdn_fops =
{
+ owner: THIS_MODULE,
llseek: no_llseek,
read: isdn_divert_read,
write: isdn_divert_write,
while(i--)
{
- DivaDoCardDpc(card++);
+ if (card->state == DIA_RUNNING)
+ DivaDoCardDpc(card);
+ card++;
}
}
};
};
/* all adapter flavors checked without match, finito with: */
- return ENODEV;
+ return -ENODEV;
};
membase = cards_membase;
} else {
if (membase != cards_membase)
- return ENODEV;
+ return -ENODEV;
};
cards_irq=irq_array[((adf_pos0 & 0xC)>>2)];
if (irq == -1) {
irq = cards_irq;
} else {
if (irq != cards_irq)
- return ENODEV;
+ return -ENODEV;
};
cards_io= 0xC00 + ((adf_pos0>>4)*0x10);
type = EICON_CTYPE_ISAPRI;
membase = cards_membase;
} else {
if (membase != cards_membase)
- return ENODEV;
+ return -ENODEV;
};
cards_irq=irq_array[((adf_pos0 & 0xC)>>2)];
if (irq == -1) {
irq = cards_irq;
} else {
if (irq != cards_irq)
- return ENODEV;
+ return -ENODEV;
};
cards_io= 0xC00 + ((adf_pos0>>4)*0x10);
irq = cards_irq;
} else {
if (irq != cards_irq)
- return ENODEV;
+ return -ENODEV;
};
type = 0;
break;
default:
- return ENODEV;
+ return -ENODEV;
};
/* matching membase & irq */
if ( 1 == eicon_addcard(type, membase, irq, id, 0)) {
cards->mca_slot+1);
return 0 ; /* eicon_addcard added a card */
} else {
- return ENODEV;
+ return -ENODEV;
};
};
#endif /* CONFIG_MCA */
dev_kfree_skb_irq(skb);
}
-static void hdlc_irq(struct fritz_bcs *bcs, u32 stat)
+static void hdlc_irq_one(struct fritz_bcs *bcs, u32 stat)
{
DBG(0x10, "ch%d stat %#x", bcs->channel, stat);
if (stat & HDLC_INT_RPR) {
}
}
-static inline void hdlc_interrupt(struct fritz_adapter *adapter)
+static inline void hdlc_irq(struct fritz_adapter *adapter)
{
int nr;
u32 stat;
stat = adapter->read_hdlc_status(adapter, nr);
DBG(0x10, "HDLC %c stat %#x", 'A' + nr, stat);
if (stat & HDLC_INT_MASK)
- hdlc_irq(&adapter->bcs[nr], stat);
+ hdlc_irq_one(&adapter->bcs[nr], stat);
}
}
return;
DBG(2, "STATUS0 %#x", val);
if (val & AVM_STATUS0_IRQ_ISAC)
- isacsx_interrupt(&adapter->isac);
+ isacsx_irq(&adapter->isac);
if (val & AVM_STATUS0_IRQ_HDLC)
- hdlc_interrupt(adapter);
+ hdlc_irq(adapter);
}
static void fcpci_irq(int intno, void *dev, struct pt_regs *regs)
return;
DBG(2, "sval %#x", sval);
if (!(sval & AVM_STATUS0_IRQ_ISAC))
- isac_interrupt(&adapter->isac);
+ isac_irq(&adapter->isac);
if (!(sval & AVM_STATUS0_IRQ_HDLC))
- hdlc_interrupt(adapter);
+ hdlc_irq(adapter);
}
// ----------------------------------------------------------------------
}
}
-void isac_interrupt(struct isac *isac)
+void isac_irq(struct isac *isac)
{
unsigned char val;
}
}
-void isacsx_interrupt(struct isac *isac)
+void isacsx_irq(struct isac *isac)
{
unsigned char val;
EXPORT_SYMBOL(isac_d_l2l1);
EXPORT_SYMBOL(isacsx_setup);
-EXPORT_SYMBOL(isacsx_interrupt);
+EXPORT_SYMBOL(isacsx_irq);
EXPORT_SYMBOL(isac_setup);
-EXPORT_SYMBOL(isac_interrupt);
+EXPORT_SYMBOL(isac_irq);
module_init(hisax_isac_init);
module_exit(hisax_isac_exit);
void isac_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg);
void isac_setup(struct isac *isac);
-void isac_interrupt(struct isac *isac);
+void isac_irq(struct isac *isac);
void isacsx_setup(struct isac *isac);
-void isacsx_interrupt(struct isac *isac);
+void isacsx_irq(struct isac *isac);
#endif
usb_b_out_complete, bcs);
}
-static void __devexit st5481_release_b_out(struct st5481_bcs *bcs)
+static void st5481_release_b_out(struct st5481_bcs *bcs)
{
struct st5481_b_out *b_out = &bcs->b_out;
/*
* Release buffers and URBs for the B channels
*/
-void __devexit st5481_release_b(struct st5481_bcs *bcs)
+void st5481_release_b(struct st5481_bcs *bcs)
{
DBG(4,"");
usb_d_out_complete, adapter);
}
-static void __devexit st5481_release_d_out(struct st5481_adapter *adapter)
+static void st5481_release_d_out(struct st5481_adapter *adapter)
{
struct st5481_d_out *d_out = &adapter->d_out;
return retval;
}
-void __devexit st5481_release_d(struct st5481_adapter *adapter)
+void st5481_release_d(struct st5481_adapter *adapter)
{
DBG(2,"");
* Release buffers and URBs for the interrupt and control
* endpoint.
*/
-void __devexit st5481_release_usb(struct st5481_adapter *adapter)
+void st5481_release_usb(struct st5481_adapter *adapter)
{
struct st5481_intr *intr = &adapter->intr;
struct st5481_ctrl *ctrl = &adapter->ctrl;
return retval;
}
-void __devexit st5481_release_isocpipes(struct urb* urb[2])
+void st5481_release_isocpipes(struct urb* urb[2])
{
int j;
return retval;
}
-void __devexit st5481_release_in(struct st5481_in *in)
+void st5481_release_in(struct st5481_in *in)
{
DBG(2,"");
+++ /dev/null
-/* $Id: hysdn_procfs.c,v 1.1 2000/02/10 19:45:18 werner Exp $
-
- * Linux driver for HYSDN cards, /proc/net filesystem log functions.
- * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH
- *
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Log: hysdn_procfs.c,v $
- * Revision 1.1 2000/02/10 19:45:18 werner
- *
- * Initial release
- *
- *
- */
-
-#define __NO_VERSION__
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/pci.h>
-#include <linux/smp_lock.h>
-
-#include "hysdn_defs.h"
-
-static char *hysdn_procfs_revision = "$Revision: 1.1 $";
-
-#define INFO_OUT_LEN 80 /* length of info line including lf */
-
-/*************************************************/
-/* structure keeping ascii log for device output */
-/*************************************************/
-struct log_data {
- struct log_data *next;
- ulong usage_cnt; /* number of files still to work */
- void *proc_ctrl; /* pointer to own control procdata structure */
- char log_start[2]; /* log string start (final len aligned by size) */
-};
-
-/**********************************************/
-/* structure holding proc entrys for one card */
-/**********************************************/
-struct procdata {
- struct proc_dir_entry *log; /* log entry */
- char log_name[15]; /* log filename */
- struct log_data *log_head, *log_tail; /* head and tail for queue */
- int if_used; /* open count for interface */
- wait_queue_head_t rd_queue;
-};
-
-/********************************************/
-/* put an log buffer into the log queue. */
-/* This buffer will be kept until all files */
-/* opened for read got the contents. */
-/* Flushes buffers not longer in use. */
-/********************************************/
-void
-put_log_buffer(hysdn_card * card, char *cp)
-{
- struct log_data *ib;
- struct procdata *pd = card->procfs;
- int flags;
-
- if (!pd)
- return;
- if (!cp)
- return;
- if (!*cp)
- return;
- if (pd->if_used <= 0)
- return; /* no open file for read */
-
- if (!(ib = (struct log_data *) kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
- return; /* no memory */
- strcpy(ib->log_start, cp); /* set output string */
- ib->next = NULL;
- ib->proc_ctrl = pd; /* point to own control structure */
- save_flags(flags);
- cli();
- ib->usage_cnt = pd->if_used;
- if (!pd->log_head)
- pd->log_head = ib; /* new head */
- else
- pd->log_tail->next = ib; /* follows existing messages */
- pd->log_tail = ib; /* new tail */
- restore_flags(flags);
-
- /* delete old entrys */
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else
- break;
- } /* pd->log_head->next */
- wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
-} /* put_log_buffer */
-
-
-/**********************************/
-/* log file operations and tables */
-/**********************************/
-
-/****************************************/
-/* write log file -> set log level bits */
-/****************************************/
-static ssize_t
-hysdn_log_write(struct file *file, const char *buf, size_t count, loff_t * off)
-{
- int retval;
- hysdn_card *card = (hysdn_card *) file->private_data;
-
- if (&file->f_pos != off) /* fs error check */
- return (-ESPIPE);
-
- if ((retval = pof_boot_write(card, buf, count)) < 0)
- retval = -EFAULT; /* an error occurred */
-
- return (retval);
-} /* hysdn_log_write */
-
-/******************/
-/* read log file */
-/******************/
-static ssize_t
-hysdn_log_read(struct file *file, char *buf, size_t count, loff_t * off)
-{
- struct log_data *inf;
- int len;
- word ino;
- struct procdata *pd;
- hysdn_card *card;
-
- if (!*((struct log_data **) file->private_data)) {
- if (file->f_flags & O_NONBLOCK)
- return (-EAGAIN);
-
- /* sorry, but we need to search the card */
- ino = file->f_dentry->d_inode->i_ino & 0xFFFF; /* low-ino */
- card = card_root;
- while (card) {
- pd = card->procfs;
- if (pd->log->low_ino == ino)
- break;
- card = card->next; /* search next entry */
- }
- if (card)
- interruptible_sleep_on(&(pd->rd_queue));
- else
- return (-EAGAIN);
-
- }
- if (!(inf = *((struct log_data **) file->private_data)))
- return (0);
-
- inf->usage_cnt--; /* new usage count */
- (struct log_data **) file->private_data = &inf->next; /* next structure */
- if ((len = strlen(inf->log_start)) <= count) {
- if (copy_to_user(buf, inf->log_start, len))
- return -EFAULT;
- file->f_pos += len;
- return (len);
- }
- return (0);
-} /* hysdn_log_read */
-
-/******************/
-/* open log file */
-/******************/
-static int
-hysdn_log_open(struct inode *ino, struct file *filep)
-{
- hysdn_card *card;
- struct procdata *pd;
- ulong flags;
-
- lock_kernel();
- card = card_root;
- while (card) {
- pd = card->procfs;
- if (pd->log->low_ino == (ino->i_ino & 0xFFFF))
- break;
- card = card->next; /* search next entry */
- }
- if (!card) {
- unlock_kernel();
- return (-ENODEV); /* device is unknown/invalid */
- }
- filep->private_data = card; /* remember our own card */
-
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> boot pof data */
- if (pof_boot_open(card)) {
- unlock_kernel();
- return (-EPERM); /* no permission this time */
- }
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
-
- /* read access -> log/debug read */
- save_flags(flags);
- cli();
- pd->if_used++;
- if (pd->log_head)
- (struct log_data **) filep->private_data = &(pd->log_tail->next);
- else
- (struct log_data **) filep->private_data = &(pd->log_head);
- restore_flags(flags);
-
- } else { /* simultaneous read/write access forbidden ! */
- unlock_kernel();
- return (-EPERM); /* no permission this time */
- }
- unlock_kernel();
- return (0);
-} /* hysdn_log_open */
-
-/*******************************************************************************/
-/* close a cardlog file. If the file has been opened for exclusive write it is */
-/* assumed as pof data input and the pof loader is noticed about. */
-/* Otherwise file is handled as log output. In this case the interface usage */
-/* count is decremented and all buffers are noticed of closing. If this file */
-/* was the last one to be closed, all buffers are freed. */
-/*******************************************************************************/
-static int
-hysdn_log_close(struct inode *ino, struct file *filep)
-{
- struct log_data *inf;
- struct procdata *pd;
- hysdn_card *card;
- int flags, retval = 0;
-
- lock_kernel();
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write debug completely written */
- retval = 0; /* success */
- } else {
- /* read access -> log/debug read, mark one further file as closed */
-
- pd = NULL;
- save_flags(flags);
- cli();
- inf = *((struct log_data **) filep->private_data); /* get first log entry */
- if (inf)
- pd = (struct procdata *) inf->proc_ctrl; /* still entries there */
- else {
- /* no info available -> search card */
- card = card_root;
- while (card) {
- pd = card->procfs;
- if (pd->log->low_ino == (ino->i_ino & 0xFFFF))
- break;
- card = card->next; /* search next entry */
- }
- if (card)
- pd = card->procfs; /* pointer to procfs ctrl */
- }
- if (pd)
- pd->if_used--; /* decrement interface usage count by one */
-
- while (inf) {
- inf->usage_cnt--; /* decrement usage count for buffers */
- inf = inf->next;
- }
- restore_flags(flags);
-
- if (pd)
- if (pd->if_used <= 0) /* delete buffers if last file closed */
- while (pd->log_head) {
- inf = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(inf);
- }
- } /* read access */
-
- unlock_kernel();
- return (retval);
-} /* hysdn_log_close */
-
-/*************************************************/
-/* select/poll routine to be able using select() */
-/*************************************************/
-static unsigned int
-hysdn_log_poll(struct file *file, poll_table * wait)
-{
- unsigned int mask = 0;
- word ino;
- hysdn_card *card;
- struct procdata *pd;
-
- if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
- return (mask); /* no polling for write supported */
-
- /* we need to search the card */
- ino = file->f_dentry->d_inode->i_ino & 0xFFFF; /* low-ino */
- card = card_root;
- while (card) {
- pd = card->procfs;
- if (pd->log->low_ino == ino)
- break;
- card = card->next; /* search next entry */
- }
- if (!card)
- return (mask); /* card not found */
-
- poll_wait(file, &(pd->rd_queue), wait);
-
- if (*((struct log_data **) file->private_data))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-} /* hysdn_log_poll */
-
-/**************************************************/
-/* table for log filesystem functions defined above. */
-/**************************************************/
-static struct file_operations log_fops =
-{
- llseek: no_llseek,
- read: hysdn_log_read,
- write: hysdn_log_write,
- poll: hysdn_log_poll,
- open: hysdn_log_open,
- release: hysdn_log_close,
-};
-
-/*****************************************/
-/* Output info data to the cardinfo file */
-/*****************************************/
-static int
-info_read(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
-{
- char tmp[INFO_OUT_LEN * 11 + 2];
- int i;
- char *cp;
- hysdn_card *card;
-
- sprintf(tmp, "id bus slot type irq iobase plx-mem dp-mem boot device");
- cp = tmp; /* start of string */
- while (*cp)
- cp++;
- while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
- *cp++ = ' ';
- *cp++ = '\n';
-
- card = card_root; /* start of list */
- while (card) {
- sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08x 0x%08x",
- card->myid,
- card->bus,
- PCI_SLOT(card->devfn),
- card->brdtype,
- card->irq,
- card->iobase,
- card->plxbase,
- card->membase);
- card = card->next;
- while (*cp)
- cp++;
- while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
- *cp++ = ' ';
- *cp++ = '\n';
- }
-
- i = cp - tmp;
- *start = buffer;
- if (offset + length > i) {
- length = i - offset;
- *eof = 1;
- } else if (offset > i) {
- length = 0;
- *eof = 1;
- }
- cp = tmp + offset;
-
- if (length > 0) {
- /* start_bh_atomic(); */
- memcpy(buffer, cp, length);
- /* end_bh_atomic(); */
- return length;
- }
- return 0;
-} /* info_read */
-
-/*****************************/
-/* hysdn subdir in /proc/net */
-/*****************************/
-static struct proc_dir_entry *hysdn_proc_entry = NULL;
-static struct proc_dir_entry *hysdn_info_entry = NULL;
-
-/***************************************************************************************/
-/* hysdn_procfs_init is called when the module is loaded and after the cards have been */
-/* detected. The needed proc dir and card entries are created. */
-/***************************************************************************************/
-int
-hysdn_procfs_init(void)
-{
- struct procdata *pd;
- hysdn_card *card;
-
- hysdn_proc_entry = create_proc_entry(PROC_SUBDIR_NAME, S_IFDIR | S_IRUGO | S_IXUGO, proc_net);
- if (!hysdn_proc_entry) {
- printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
- return (-1);
- }
- hysdn_info_entry = create_proc_entry("cardinfo", 0, hysdn_proc_entry);
- if (hysdn_info_entry)
- hysdn_info_entry->read_proc = info_read; /* read info function */
-
- /* create all cardlog proc entries */
-
- card = card_root; /* start with first card */
- while (card) {
- if ((pd = (struct procdata *) kmalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
- memset(pd, 0, sizeof(struct procdata));
-
- sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
- if ((pd->log = create_proc_entry(pd->log_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry)) != NULL) {
- pd->log->proc_fops = &log_fops; /* set new operations table */
- pd->log->owner = THIS_MODULE;
- }
-
- init_waitqueue_head(&(pd->rd_queue));
-
- card->procfs = (void *) pd; /* remember procfs structure */
- }
- card = card->next; /* point to next card */
- }
-
- printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procfs_revision));
- return (0);
-} /* hysdn_procfs_init */
-
-/***************************************************************************************/
-/* hysdn_procfs_release is called when the module is unloaded and before the cards */
-/* resources are released. The module counter is assumed to be 0 ! */
-/***************************************************************************************/
-void
-hysdn_procfs_release(void)
-{
- struct procdata *pd;
- hysdn_card *card;
-
- card = card_root; /* start with first card */
- while (card) {
- if ((pd = (struct procdata *) card->procfs) != NULL) {
- if (pd->log)
- remove_proc_entry(pd->log_name, hysdn_proc_entry);
- kfree(pd); /* release memory */
- }
- card = card->next; /* point to next card */
- }
-
- remove_proc_entry("cardinfo", hysdn_proc_entry);
- remove_proc_entry(PROC_SUBDIR_NAME, proc_net);
-} /* hysdn_procfs_release */
unsigned int nr;
unsigned long irq; /* IRQ used by SAA7146 card */
unsigned short id;
- struct i2c_bus i2c;
struct pci_dev *dev;
unsigned char revision;
unsigned char boardcfg[64]; /* 64 bytes of config from eeprom */
#include <asm/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/videodev.h>
-#include <linux/i2c-old.h>
#include "saa7146.h"
#include "saa7146reg.h"
!(saaread(SAA7146_MC2) & SAA7146_MC2_UPLD_I2C); i++)
schedule();
}
+
/* read I2C */
-static int I2CRead(struct i2c_bus *bus, unsigned char addr,
+static int I2CRead(struct saa7146 *saa, unsigned char addr,
unsigned char subaddr, int dosub)
{
- struct saa7146 *saa = (struct saa7146 *) bus->data;
int i;
-
if (saaread(SAA7146_I2C_STATUS) & 0x3c)
I2CWipe(saa);
for (i = 0; i < 1000 &&
printk("i2c read timeout\n");
return ((saaread(SAA7146_I2C_TRANSFER) >> 24) & 0xff);
}
-static int I2CReadOld(struct i2c_bus *bus, unsigned char addr)
-{
- return I2CRead(bus, addr, 0, 0);
-}
/* set both to write both bytes, reset it to write only b1 */
-static int I2CWrite(struct i2c_bus *bus, unsigned char addr, unsigned char b1,
+static int I2CWrite(struct saa7146 *saa, unsigned char addr, unsigned char b1,
unsigned char b2, int both)
{
- struct saa7146 *saa = (struct saa7146 *) bus->data;
int i;
u32 data;
return 0;
}
-static void attach_inform(struct i2c_bus *bus, int id)
+static void attach_inform(struct saa7146 *saa, int id)
{
- struct saa7146 *saa = (struct saa7146 *) bus->data;
int i;
DEBUG(printk(KERN_DEBUG "stradis%d: i2c: device found=%02x\n", saa->nr, id));
if (id == 0xa0) { /* we have rev2 or later board, fill in info */
for (i = 0; i < 64; i++)
- saa->boardcfg[i] = I2CRead(bus, 0xa0, i, 1);
+ saa->boardcfg[i] = I2CRead(saa, 0xa0, i, 1);
#ifdef USE_RESCUE_EEPROM_SDM275
if (saa->boardcfg[0] != 0) {
printk("stradis%d: WARNING: EEPROM STORED VALUES HAVE BEEN IGNORED\n", saa->nr);
}
}
-static void detach_inform(struct i2c_bus *bus, int id)
+static void detach_inform(struct saa7146 *saa, int id)
{
- struct saa7146 *saa = (struct saa7146 *) bus->data;
int i;
i = saa->nr;
}
-static void I2CBusScan(struct i2c_bus *bus)
+static void I2CBusScan(struct saa7146 *saa)
{
int i;
for (i = 0; i < 0xff; i += 2)
- if ((I2CRead(bus, i, 0, 0)) >= 0)
- attach_inform(bus, i);
+ if ((I2CRead(saa, i, 0, 0)) >= 0)
+ attach_inform(saa, i);
}
-static struct i2c_bus saa7146_i2c_bus_template =
-{
- "saa7146",
- I2C_BUSID_BT848,
- NULL,
- SPIN_LOCK_UNLOCKED,
- attach_inform,
- detach_inform,
- NULL,
- NULL,
- I2CReadOld,
- I2CWrite,
-};
-
static int debiwait_maxwait = 0;
static int wait_for_debi_done(struct saa7146 *saa)
static void cs4341_setlevel(struct saa7146 *saa, int left, int right)
{
- I2CWrite(&(saa->i2c), 0x22, 0x03,
- left > 94 ? 94 : left, 2);
- I2CWrite(&(saa->i2c), 0x22, 0x04,
- right > 94 ? 94 : right, 2);
+ I2CWrite(saa, 0x22, 0x03, left > 94 ? 94 : left, 2);
+ I2CWrite(saa, 0x22, 0x04, right > 94 ? 94 : right, 2);
}
static void initialize_cs4341(struct saa7146 *saa)
for (i = 0; i < 200; i++) {
/* auto mute off, power on, no de-emphasis */
/* I2S data up to 24-bit 64xFs internal SCLK */
- I2CWrite(&(saa->i2c), 0x22, 0x01, 0x11, 2);
+ I2CWrite(saa, 0x22, 0x01, 0x11, 2);
/* ATAPI mixer settings */
- I2CWrite(&(saa->i2c), 0x22, 0x02, 0x49, 2);
+ I2CWrite(saa, 0x22, 0x02, 0x49, 2);
/* attenuation left 3db */
- I2CWrite(&(saa->i2c), 0x22, 0x03, 0x00, 2);
+ I2CWrite(saa, 0x22, 0x03, 0x00, 2);
/* attenuation right 3db */
- I2CWrite(&(saa->i2c), 0x22, 0x04, 0x00, 2);
- I2CWrite(&(saa->i2c), 0x22, 0x01, 0x10, 2);
- if (I2CRead(&(saa->i2c), 0x22, 0x02, 1) == 0x49)
+ I2CWrite(saa, 0x22, 0x04, 0x00, 2);
+ I2CWrite(saa, 0x22, 0x01, 0x10, 2);
+ if (I2CRead(saa, 0x22, 0x02, 1) == 0x49)
break;
schedule();
}
else
sequence = mode8420con;
for (i = 0; i < INIT8420LEN; i++)
- I2CWrite(&(saa->i2c), 0x20, init8420[i * 2],
+ I2CWrite(saa, 0x20, init8420[i * 2],
init8420[i * 2 + 1], 2);
for (i = 0; i < MODE8420LEN; i++)
- I2CWrite(&(saa->i2c), 0x20, sequence[i * 2],
+ I2CWrite(saa, 0x20, sequence[i * 2],
sequence[i * 2 + 1], 2);
printk("stradis%d: CS8420 initialized\n", saa->nr);
}
for (i = 0; i < INIT7121LEN; i++) {
if (NewCard) { /* handle new card encoder differences */
if (sequence[i*2] == 0x3a)
- I2CWrite(&(saa->i2c), 0x88, 0x3a, 0x13, 2);
+ I2CWrite(saa, 0x88, 0x3a, 0x13, 2);
else if (sequence[i*2] == 0x6b)
- I2CWrite(&(saa->i2c), 0x88, 0x6b, 0x20, 2);
+ I2CWrite(saa, 0x88, 0x6b, 0x20, 2);
else if (sequence[i*2] == 0x6c)
- I2CWrite(&(saa->i2c), 0x88, 0x6c,
+ I2CWrite(saa, 0x88, 0x6c,
dopal ? 0x09 : 0xf5, 2);
else if (sequence[i*2] == 0x6d)
- I2CWrite(&(saa->i2c), 0x88, 0x6d,
+ I2CWrite(saa, 0x88, 0x6d,
dopal ? 0x20 : 0x00, 2);
else if (sequence[i*2] == 0x7a)
- I2CWrite(&(saa->i2c), 0x88, 0x7a,
+ I2CWrite(saa, 0x88, 0x7a,
dopal ? (PALFirstActive - 1) :
(NTSCFirstActive - 4), 2);
else if (sequence[i*2] == 0x7b)
- I2CWrite(&(saa->i2c), 0x88, 0x7b,
+ I2CWrite(saa, 0x88, 0x7b,
dopal ? PALLastActive :
NTSCLastActive, 2);
- else I2CWrite(&(saa->i2c), 0x88, sequence[i * 2],
+ else I2CWrite(saa, 0x88, sequence[i * 2],
sequence[i * 2 + 1], 2);
} else {
if (sequence[i*2] == 0x6b && mod)
- I2CWrite(&(saa->i2c), 0x88, 0x6b,
+ I2CWrite(saa, 0x88, 0x6b,
(sequence[i * 2 + 1] ^ 0x09), 2);
else if (sequence[i*2] == 0x7a)
- I2CWrite(&(saa->i2c), 0x88, 0x7a,
+ I2CWrite(saa, 0x88, 0x7a,
dopal ? (PALFirstActive - 1) :
(NTSCFirstActive - 4), 2);
else if (sequence[i*2] == 0x7b)
- I2CWrite(&(saa->i2c), 0x88, 0x7b,
+ I2CWrite(saa, 0x88, 0x7b,
dopal ? PALLastActive :
NTSCLastActive, 2);
else
- I2CWrite(&(saa->i2c), 0x88, sequence[i * 2],
+ I2CWrite(saa, 0x88, sequence[i * 2],
sequence[i * 2 + 1], 2);
}
}
if (!saa->saa7146_mem)
return -EIO;
- memcpy(&(saa->i2c), &saa7146_i2c_bus_template, sizeof(struct i2c_bus));
memcpy(&saa->video_dev, &saa_template, sizeof(saa_template));
- sprintf(saa->i2c.name, "stradis%d", num);
- saa->i2c.data = saa;
saawrite(0, SAA7146_IER); /* turn off all interrupts */
result = request_irq(saa->irq, saa7146_irq,
SA_SHIRQ | SA_INTERRUPT, "stradis", (void *) saa);
iounmap(saa->saa7146_mem);
return -1;
}
-#if 0
- /* i2c generic interface is currently BROKEN */
- i2c_register_bus(&saa->i2c);
-#endif
return 0;
}
saawrite(4, SAA7146_PAGE2); /* dma direction: read, no byteswap */
saawrite(((SAA7146_MC2_UPLD_DMA2) << 16) | SAA7146_MC2_UPLD_DMA2,
SAA7146_MC2);
- I2CBusScan(&(saa->i2c));
+ I2CBusScan(saa);
return 0;
}
saawrite(0, SAA7146_MC2);
saawrite(0, SAA7146_IER);
saawrite(0xffffffffUL, SAA7146_ISR);
-#if 0
- /* unregister i2c_bus */
- i2c_unregister_bus((&saa->i2c));
-#endif
/* disable PCI bus-mastering */
pci_read_config_byte(saa->dev, PCI_COMMAND, &command);
u8 *scsi_buf;
unsigned long flags;
- if (rq->cmd != IDESCSI_PC_RQ) {
+ if (!(rq->flags & REQ_SPECIAL)) {
ide_end_request (uptodate, hwgroup);
return;
}
printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %ld\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors);
#endif /* IDESCSI_DEBUG_LOG */
- if (rq->cmd == IDESCSI_PC_RQ) {
+ if (rq->flags & REQ_SPECIAL) {
return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->buffer);
}
- printk (KERN_ERR "ide-scsi: %s: unsupported command in request queue (%x)\n", drive->name, rq->cmd);
+ blk_dump_rq_flags(rq, "ide-scsi: unsup command");
idescsi_end_request (0,HWGROUP (drive));
return ide_stopped;
}
ide_init_drive_cmd (rq);
rq->buffer = (char *) pc;
rq->bio = idescsi_dma_bio (drive, pc);
- rq->cmd = IDESCSI_PC_RQ;
+ rq->flags = REQ_SPECIAL;
spin_unlock(&cmd->host->host_lock);
(void) ide_do_drive_cmd (drive, rq, ide_end);
spin_lock_irq(&cmd->host->host_lock);
ASSERT_LOCK(&q->queue_lock, 0);
- rq->cmd = SPECIAL;
+ rq->flags = REQ_SPECIAL | REQ_NOMERGE | REQ_BARRIER;
rq->special = data;
rq->q = NULL;
rq->bio = rq->biotail = NULL;
rq->nr_segments = 0;
rq->elevator_sequence = 0;
- rq->inactive = 0;
/*
* We have the option of inserting the head or the tail of the queue.
* the bad sector.
*/
SCpnt->request.special = (void *) SCpnt;
+#if 0
+ SCpnt->request.flags |= REQ_SPECIAL;
+#endif
list_add(&SCpnt->request.queuelist, &q->queue_head);
}
if (bbpnt) {
for (i = 0; i < SCpnt->use_sg; i++) {
if (bbpnt[i]) {
- if (SCpnt->request.cmd == READ) {
+ if (rq_data_dir(req) == READ) {
memcpy(bbpnt[i],
sgpnt[i].address,
sgpnt[i].length);
scsi_free(SCpnt->buffer, SCpnt->sglist_len);
} else {
if (SCpnt->buffer != req->buffer) {
- if (req->cmd == READ) {
+ if (rq_data_dir(req) == READ) {
unsigned long flags;
char *to = bio_kmap_irq(req->bio, &flags);
break;
/*
- * get next queueable request. cur_rq would be set if we
- * previously had to abort for some reason
+ * get next queueable request.
*/
req = elv_next_request(q);
* these two cases differently. We differentiate by looking
* at request.cmd, as this tells us the real story.
*/
- if (req->cmd == SPECIAL) {
+ if (req->flags & REQ_SPECIAL) {
STpnt = NULL;
SCpnt = (Scsi_Cmnd *) req->special;
SRpnt = (Scsi_Request *) req->special;
scsi_init_cmd_from_req(SCpnt, SRpnt);
}
- } else {
+ } else if (req->flags & REQ_CMD) {
SRpnt = NULL;
STpnt = scsi_get_request_dev(req);
if (!STpnt) {
/*
* Now try and find a command block that we can use.
*/
- if( req->special != NULL ) {
+ if (req->special) {
SCpnt = (Scsi_Cmnd *) req->special;
/*
* We need to recount the number of
*/
if (!SCpnt)
break;
+ } else {
+ blk_dump_rq_flags(req, "SCSI bad req");
+ break;
}
/*
req = NULL;
spin_unlock_irq(&q->queue_lock);
- if (SCpnt->request.cmd != SPECIAL) {
+ if (SCpnt->request.flags & REQ_CMD) {
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
* some kinds of consistency checking may cause the
* request to be rejected immediately.
*/
- if (STpnt == NULL) {
- STpnt = scsi_get_request_dev(req);
- }
+ if (STpnt == NULL)
+ STpnt = scsi_get_request_dev(&SCpnt->request);
+
/*
* This sets up the scatter-gather table (allocating if
* required). Hosts that need bounce buffers will also
static inline int scsi_new_segment(request_queue_t * q,
struct request * req,
- struct Scsi_Host *SHpnt)
+ struct bio *bio)
{
/*
* pci_map_sg won't be able to map these two
*/
if (req->nr_hw_segments >= q->max_segments)
return 0;
- else if (req->nr_segments >= q->max_segments)
+ else if (req->nr_segments + bio->bi_vcnt > q->max_segments)
return 0;
- req->nr_hw_segments++;
- req->nr_segments++;
+ req->nr_hw_segments += bio->bi_vcnt;
+ req->nr_segments += bio->bi_vcnt;
return 1;
}
static inline int scsi_new_segment(request_queue_t * q,
struct request * req,
- struct Scsi_Host *SHpnt)
+ struct bio *bio)
{
- if (req->nr_segments >= q->max_segments)
+ if (req->nr_segments + bio->bi_vcnt > q->max_segments)
return 0;
/*
* This will form the start of a new segment. Bump the
* counter.
*/
- req->nr_segments++;
+ req->nr_segments += bio->bi_vcnt;
return 1;
}
#endif
struct bio *bio,
int dma_host)
{
- Scsi_Device *SDpnt = q->queuedata;
-
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors)
return 0;
else if (!BIO_SEG_BOUNDARY(q, req->biotail, bio))
#ifdef DMA_CHUNK_SIZE
if (MERGEABLE_BUFFERS(req->biotail, bio))
- return scsi_new_mergeable(q, req, SDpnt->host);
+ return scsi_new_mergeable(q, req, q->queuedata);
#endif
- return scsi_new_segment(q, req, SDpnt->host);
+ return scsi_new_segment(q, req, bio);
}
__inline static int __scsi_front_merge_fn(request_queue_t * q,
struct bio *bio,
int dma_host)
{
- Scsi_Device *SDpnt = q->queuedata;
-
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors)
return 0;
else if (!BIO_SEG_BOUNDARY(q, bio, req->bio))
#ifdef DMA_CHUNK_SIZE
if (MERGEABLE_BUFFERS(bio, req->bio))
- return scsi_new_mergeable(q, req, SDpnt->host);
+ return scsi_new_mergeable(q, req, q->queuedata);
#endif
- return scsi_new_segment(q, req, SDpnt->host);
+ return scsi_new_segment(q, req, bio);
}
/*
}
break;
}
- if (req->cmd == WRITE) {
+ if (rq_data_dir(req) == WRITE)
memcpy(sgpnt[i].address, bbpnt[i],
sgpnt[i].length);
- }
}
}
return 1;
return 0;
}
}
- if (req->cmd == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
unsigned long flags;
char *buf = bio_kmap_irq(bio, &flags);
memcpy(buff, buf, this_count << 9);
this_count = this_count >> 3;
}
}
- switch (SCpnt->request.cmd) {
- case WRITE:
+ if (rq_data_dir(&SCpnt->request) == WRITE) {
if (!dpnt->device->writeable) {
return 0;
}
SCpnt->cmnd[0] = WRITE_6;
SCpnt->sc_data_direction = SCSI_DATA_WRITE;
- break;
- case READ:
+ } else if (rq_data_dir(&SCpnt->request) == READ) {
SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = SCSI_DATA_READ;
- break;
- default:
- panic("Unknown sd command %d\n", SCpnt->request.cmd);
- }
+ } else
+ panic("Unknown sd command %lx\n", SCpnt->request.flags);
SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
nbuff,
return 0;
}
- if ((SCpnt->request.cmd == WRITE) && !scsi_CDs[dev].device->writeable)
+ if (!(SCpnt->request.flags & REQ_CMD)) {
+ blk_dump_rq_flags(&SCpnt->request, "sr unsup command");
+ return 0;
+ }
+
+ if (rq_data_dir(&SCpnt->request) == WRITE && !scsi_CDs[dev].device->writeable)
return 0;
/*
return 0;
}
- block = SCpnt->request.sector / (s_size >> 9);
+ if (rq_data_dir(&SCpnt->request) == WRITE) {
+ if (!scsi_CDs[dev].device->writeable)
+ return 0;
+ SCpnt->cmnd[0] = WRITE_10;
+ SCpnt->sc_data_direction = SCSI_DATA_WRITE;
+ } else if (rq_data_dir(&SCpnt->request) == READ) {
+ SCpnt->cmnd[0] = READ_10;
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
+ } else {
+ blk_dump_rq_flags(&SCpnt->request, "Unknown sr command");
+ return 0;
+ }
/*
* request doesn't start on hw block boundary, add scatter pads
this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
- switch (SCpnt->request.cmd) {
- case WRITE:
- SCpnt->cmnd[0] = WRITE_10;
- SCpnt->sc_data_direction = SCSI_DATA_WRITE;
- break;
- case READ:
- SCpnt->cmnd[0] = READ_10;
- SCpnt->sc_data_direction = SCSI_DATA_READ;
- break;
- default:
- printk("Unknown sr command %d\n", SCpnt->request.cmd);
- return 0;
- }
SCSI_LOG_HLQUEUE(2, printk("sr%d : %s %d/%ld 512 byte blocks.\n",
devm,
SCpnt->cmnd[1] = (SCpnt->device->scsi_level <= SCSI_2) ?
((SCpnt->lun << 5) & 0xe0) : 0;
+ block = SCpnt->request.sector / (s_size >> 9);
+
if (this_count > 0xffff)
this_count = 0xffff;
struct bio *bio;
if ((bio = bio_pool)) {
- BUG_ON(bio_pool_free <= 0);
+ BIO_BUG_ON(bio_pool_free <= 0);
bio_pool = bio->bi_next;
bio->bi_next = NULL;
bio_pool_free--;
spin_lock_irqsave(&bio_lock, flags);
bio = __bio_pool_get();
- BUG_ON(!bio && bio_pool_free);
+ BIO_BUG_ON(!bio && bio_pool_free);
spin_unlock_irqrestore(&bio_lock, flags);
return bio;
}
}
-#define BIO_CAN_WAIT(gfp_mask) \
- (((gfp_mask) & (__GFP_WAIT | __GFP_IO)) == (__GFP_WAIT | __GFP_IO))
+#define BIO_CAN_WAIT(gfp_mask) ((gfp_mask) & __GFP_WAIT)
static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, int *idx)
{
{
struct biovec_pool *bp = &bvec_list[bio->bi_max];
- BUG_ON(bio->bi_max >= BIOVEC_NR_POOLS);
+ BIO_BUG_ON(bio->bi_max >= BIOVEC_NR_POOLS);
/*
* cloned bio doesn't own the veclist
*/
- if (!(bio->bi_flags & (1 << BIO_CLONED)))
+ if (!(bio->bi_flags & (1 << BIO_CLONED))) {
kmem_cache_free(bp->bp_cachep, bio->bi_io_vec);
+ wake_up_nr(&bp->bp_wait, 1);
+ }
bio_pool_put(bio);
}
inline void bio_init(struct bio *bio)
{
bio->bi_next = NULL;
- atomic_set(&bio->bi_cnt, 1);
bio->bi_flags = 0;
bio->bi_rw = 0;
bio->bi_vcnt = 0;
bio->bi_idx = 0;
bio->bi_size = 0;
bio->bi_end_io = NULL;
+ atomic_set(&bio->bi_cnt, 1);
}
static inline struct bio *__bio_alloc(int gfp_mask, bio_destructor_t *dest)
*/
static inline void bio_free(struct bio *bio)
{
+ bio->bi_next = NULL;
bio->bi_destructor(bio);
}
**/
void bio_put(struct bio *bio)
{
- BUG_ON(!atomic_read(&bio->bi_cnt));
+ BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
/*
* last put frees it
*/
- if (atomic_dec_and_test(&bio->bi_cnt)) {
- BUG_ON(bio->bi_next);
-
+ if (atomic_dec_and_test(&bio->bi_cnt))
bio_free(bio);
- }
}
/**
static int bio_end_io_kio(struct bio *bio, int nr_sectors)
{
struct kiobuf *kio = (struct kiobuf *) bio->bi_private;
- int uptodate, done;
-
- done = 0;
- uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- do {
- int sectors = bio->bi_io_vec[bio->bi_idx].bv_len >> 9;
-
- nr_sectors -= sectors;
-
- bio->bi_idx++;
-
- done = !end_kio_request(kio, uptodate);
-
- if (bio->bi_idx == bio->bi_vcnt)
- done = 1;
- } while (!done && nr_sectors > 0);
-
- /*
- * all done
- */
- if (done) {
- bio_put(bio);
- return 0;
- }
-
- return 1;
+ end_kio_request(kio, test_bit(BIO_UPTODATE, &bio->bi_flags));
+ bio_put(bio);
+ return 0;
}
/*
max_bytes = get_max_sectors(dev) << 9;
max_segments = get_max_segments(dev);
if ((max_bytes >> PAGE_SHIFT) < (max_segments + 1))
- max_segments = (max_bytes >> PAGE_SHIFT) + 1;
+ max_segments = (max_bytes >> PAGE_SHIFT);
if (max_segments > BIO_MAX_PAGES)
max_segments = BIO_MAX_PAGES;
offset = kio->offset & ~PAGE_MASK;
size = kio->length;
- /*
- * set I/O count to number of pages for now
- */
- atomic_set(&kio->io_count, total_nr_pages);
+ atomic_set(&kio->io_count, 1);
map_i = 0;
next_chunk:
+ atomic_inc(&kio->io_count);
if ((nr_pages = total_nr_pages) > max_segments)
nr_pages = max_segments;
out:
if (err)
kio->errno = err;
+
+ end_kio_request(kio, !err);
}
int bio_endio(struct bio *bio, int uptodate, int nr_sectors)
void coda_cache_enter(struct inode *inode, int mask)
{
struct coda_inode_info *cii = ITOC(inode);
- ENTRY;
if ( !coda_cred_ok(&cii->c_cached_cred) ) {
coda_load_creds(&cii->c_cached_cred);
void coda_cache_clear_inode(struct inode *inode)
{
struct coda_inode_info *cii = ITOC(inode);
- ENTRY;
cii->c_cached_perm = 0;
}
struct coda_inode_info *cii;
struct list_head *tmp;
- ENTRY;
sbi = coda_sbp(sb);
if (!sbi) BUG();
{
struct dentry *alias_de;
- ENTRY;
if ( !inode || !S_ISDIR(inode->i_mode))
return;
struct coda_vattr attr;
int error;
- ENTRY;
-
/* We get inode numbers from Venus -- see venus source */
error = venus_getattr(sb, fid, &attr);
if ( error ) {
"coda_cnode_make: coda_getvattr returned %d for %s.\n",
error, coda_f2s(fid));
*inode = NULL;
- EXIT;
return error;
}
*inode = coda_iget(sb, fid, &attr);
if ( IS_ERR(*inode) ) {
printk("coda_cnode_make: coda_iget failed\n");
- EXIT;
return PTR_ERR(*inode);
}
CDEBUG(D_DOWNCALL, "Done making inode: ino %ld, count %d with %s\n",
(*inode)->i_ino, atomic_read(&(*inode)->i_count),
coda_f2s(&ITOC(*inode)->c_fid));
- EXIT;
return 0;
}
ino_t nr;
struct inode *inode;
struct coda_inode_info *cii;
- ENTRY;
if ( !sb ) {
printk("coda_fid_to_inode: no sb!\n");
const char *name = entry->d_name.name;
size_t length = entry->d_name.len;
- ENTRY;
-
if ( length > CODA_MAXNAMLEN ) {
printk("name too long: lookup, %s (%*s)\n",
coda_i2s(dir), (int)length, name);
d_drop(entry);
coda_flag_inode(res_inode, C_VATTR);
}
- EXIT;
return NULL;
}
{
int error;
- ENTRY;
coda_vfs_stat.permission++;
if ( mask == 0 )
struct ViceFid newfid;
struct coda_vattr attrs;
- ENTRY;
coda_vfs_stat.create++;
CDEBUG(D_INODE, "name: %s, length %d, mode %o\n", name, length, mode);
int error;
struct ViceFid newfid;
- ENTRY;
coda_vfs_stat.mkdir++;
if (coda_isroot(dir) && coda_iscontrol(name, len))
int len = de->d_name.len;
int error;
- ENTRY;
coda_vfs_stat.link++;
if (coda_isroot(dir_inode) && coda_iscontrol(name, len))
out:
CDEBUG(D_INODE, "link result %d\n",error);
- EXIT;
return(error);
}
int symlen;
int error=0;
- ENTRY;
coda_vfs_stat.symlink++;
if (coda_isroot(dir_inode) && coda_iscontrol(name, len))
coda_dir_changed(dir_inode, 0);
CDEBUG(D_INODE, "in symlink result %d\n",error);
- EXIT;
return error;
}
const char *name = de->d_name.name;
int len = de->d_name.len;
- ENTRY;
coda_vfs_stat.unlink++;
CDEBUG(D_INODE, " %s in %s, dirino %ld\n", name ,
int len = de->d_name.len;
int error;
- ENTRY;
coda_vfs_stat.rmdir++;
if (!d_unhashed(de))
int link_adjust = 0;
int error;
- ENTRY;
coda_vfs_stat.rename++;
CDEBUG(D_INODE, "old: %s, (%d length), new: %s"
CDEBUG(D_INODE, "result %d\n", error);
- EXIT;
return error;
}
struct file *cfile, fakefile;
struct coda_inode_info *cii = ITOC(inode);
- ENTRY;
coda_vfs_stat.readdir++;
cfile = cii->c_container;
result = vfs_readdir(file, filldir, dirent);
}
- EXIT;
return result;
}
fake_file->f_pos = coda_file->f_pos;
fake_file->f_version = coda_file->f_version;
fake_file->f_op = cont_dentry->d_inode->i_fop;
+ fake_file->f_flags = coda_file->f_flags;
return ;
}
int string_offset = (int) (&((struct venus_dirent *)(0))->d_name);
int i;
- ENTRY;
-
CODA_ALLOC(buff, char *, DIR_BUFSIZE);
if ( !buff ) {
printk("coda_venus_readdir: out of memory.\n");
{
struct inode *inode = de->d_inode;
struct coda_inode_info *cii;
- ENTRY;
if (!inode)
return 1;
struct inode *inode = dentry->d_inode;
struct coda_inode_info *cii = ITOC(inode);
- ENTRY;
CDEBUG(D_INODE, "revalidating: %*s/%*s\n",
dentry->d_name.len, dentry->d_name.name,
dentry->d_parent->d_name.len, dentry->d_parent->d_name.name);
struct coda_inode_info *cii;
lock_kernel();
- ENTRY;
coda_vfs_stat.open++;
CDEBUG(D_SPECIAL, "OPEN inode number: %ld, count %d, flags %o.\n",
fh->f_dentry->d_inode->i_ino,
atomic_read(&fh->f_dentry->d_inode->i_count),
fh->f_dentry->d_inode->i_op);
- EXIT;
unlock_kernel();
return 0;
}
struct inode *cinode, *inode;
int err = 0, fcnt;
- ENTRY;
coda_vfs_stat.flush++;
/* No need to make an upcall when we have not made any modifications
int err = 0;
lock_kernel();
- ENTRY;
coda_vfs_stat.release++;
if (!use_coda_close) {
struct inode *cinode, *inode = dentry->d_inode;
struct coda_inode_info *cii = ITOC(inode);
int err = 0;
- ENTRY;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
ViceFid fid;
int error;
int idx;
- ENTRY;
idx = get_device_index((struct coda_mount_data *) data);
vc = &coda_comms[idx];
if (!vc->vc_inuse) {
printk("coda_read_super: No pseudo device\n");
- EXIT;
return NULL;
}
if ( vc->vc_sb ) {
printk("coda_read_super: Device already mounted\n");
- EXIT;
return NULL;
}
sbi = kmalloc(sizeof(struct coda_sb_info), GFP_KERNEL);
if(!sbi) {
- EXIT;
return NULL;
}
printk("coda_read_super: rootinode is %ld dev %d\n",
root->i_ino, root->i_dev);
sb->s_root = d_alloc_root(root);
- EXIT;
return sb;
error:
- EXIT;
if (sbi) {
kfree(sbi);
if(vc)
{
struct coda_sb_info *sbi;
- ENTRY;
-
sbi = coda_sbp(sb);
sbi->sbi_vcomm->vc_sb = NULL;
list_del_init(&sbi->sbi_cihead);
printk("Coda: Bye bye.\n");
kfree(sbi);
-
- EXIT;
}
/* all filling in of inodes postponed until lookup */
{
struct coda_sb_info *sbi = coda_sbp(inode->i_sb);
struct coda_inode_info *cii;
- ENTRY;
if (!sbi) BUG();
{
struct coda_inode_info *cii = ITOC(inode);
- ENTRY;
CDEBUG(D_SUPER, " inode->ino: %ld, count: %d\n",
inode->i_ino, atomic_read(&inode->i_count));
CDEBUG(D_DOWNCALL, "clearing inode: %ld, %x\n", inode->i_ino, cii->c_flags);
cii_free(inode->u.generic_ip);
inode->u.generic_ip = NULL;
#endif
-
- EXIT;
}
int coda_notify_change(struct dentry *de, struct iattr *iattr)
struct coda_vattr vattr;
int error;
- ENTRY;
memset(&vattr, 0, sizeof(vattr));
coda_iattr_to_vattr(iattr, &vattr);
}
CDEBUG(D_SUPER, "inode.i_mode %o, error %d\n", inode->i_mode, error);
- EXIT;
return error;
}
/* the coda pioctl inode ops */
static int coda_ioctl_permission(struct inode *inode, int mask)
{
- ENTRY;
-
return 0;
}
struct inode *target_inode = NULL;
struct coda_inode_info *cnp;
- ENTRY;
/* get the Pioctl data arguments from user space */
if (copy_from_user(&data, (int *)user_data, sizeof(data))) {
return -EINVAL;
{
struct venus_comm *vcp;
int idx;
- ENTRY;
lock_kernel();
idx = MINOR(inode->i_rdev);
CDEBUG(D_PSDEV, "device %i - inuse: %d\n", idx, vcp->vc_inuse);
- EXIT;
unlock_kernel();
return 0;
}
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
struct upc_req *req;
struct list_head *lh, *next;
- ENTRY;
lock_kernel();
if ( !vcp->vc_inuse ) {
}
CDEBUG(D_PSDEV, "Done.\n");
- EXIT;
unlock_kernel();
return 0;
}
MODULE_AUTHOR("Peter J. Braam <braam@cs.cmu.edu>");
+MODULE_LICENSE("GPL");
static int __init init_coda(void)
{
{
int err;
- ENTRY;
-
err = unregister_filesystem(&coda_fs_type);
if ( err != 0 ) {
printk("coda: failed to unregister filesystem\n");
char tmpbuf[80];
int tmplen = 0;
- ENTRY;
/* this works as long as we are below 1024 characters! */
if ( offset < 80 )
len += sprintf( buffer,"%-79s\n", "Coda upcall statistics");
len = length;
if ( len < 0 )
len = 0;
- EXIT;
+
return len;
}
union inputArgs *inp;
union outputArgs *outp;
int insize, outsize, error;
- ENTRY;
insize = SIZE(root);
UPARG(CODA_ROOT);
}
CODA_FREE(inp, insize);
- EXIT;
return error;
}
union inputArgs *inp;
union outputArgs *outp;
int insize, outsize, error;
- ENTRY;
insize = SIZE(getattr);
UPARG(CODA_GETATTR);
*attr = outp->coda_getattr.attr;
CODA_FREE(inp, insize);
- EXIT;
return error;
}
}
CDEBUG(D_INODE, " result %d\n",error);
- EXIT;
CODA_FREE(inp, insize);
return error;
}
error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
CDEBUG(D_INODE, " result %d\n",error);
- EXIT;
CODA_FREE(inp, insize);
return error;
}
error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
CDEBUG(D_INODE, " result %d\n",error);
- EXIT;
CODA_FREE(inp, insize);
return error;
}
error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
- EXIT;
return error;
}
}
CDEBUG(D_INODE, " result %d\n",error);
- EXIT;
CODA_FREE(inp, insize);
return error;
}
struct upc_req *req;
int error = 0;
- ENTRY;
-
vcommp = sbi->sbi_vcomm;
if ( !vcommp->vc_inuse ) {
printk("No pseudo device in upcall comms at %p\n", vcommp);
#include <linux/types.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
+#include <linux/pagemap.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
unsigned int bv_offset;
};
+/*
+ * weee, c forward decl...
+ */
+struct bio;
+typedef int (bio_end_io_t) (struct bio *, int);
+typedef void (bio_destructor_t) (struct bio *);
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
struct bio {
sector_t bi_sector;
struct bio *bi_next; /* request queue link */
- atomic_t bi_cnt; /* pin count */
kdev_t bi_dev; /* will be block device */
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
- unsigned int bi_vcnt; /* how may bio_vec's */
+ unsigned int bi_vcnt; /* how many bio_vec's */
unsigned int bi_idx; /* current index into bvl_vec */
unsigned int bi_size; /* total size in bytes */
unsigned int bi_max; /* max bvl_vecs we can hold,
struct bio_vec *bi_io_vec; /* the actual vec list */
- int (*bi_end_io)(struct bio *bio, int nr_sectors);
+ bio_end_io_t *bi_end_io;
+ atomic_t bi_cnt; /* pin count */
+
void *bi_private;
- void (*bi_destructor)(struct bio *); /* destructor */
+ bio_destructor_t *bi_destructor; /* destructor */
};
/*
*/
#define BIO_RW 0
#define BIO_RW_AHEAD 1
-#define BIO_BARRIER 2
+#define BIO_RW_BARRIER 2
/*
* various member access, note that bio_data should of course not be used
* on highmem page vectors
*/
-#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(bio)->bi_idx]))
+#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
#define bio_page(bio) bio_iovec((bio))->bv_page
#define __bio_offset(bio, idx) bio_iovec_idx((bio), (idx))->bv_offset
/*
* merge helpers etc
*/
-#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_idx - 1)
+#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
#define BIO_CONTIG(bio, nxt) \
- (bvec_to_phys(__BVEC_END((bio)) + (bio)->bi_size) == bio_to_phys((nxt)))
+ (bvec_to_phys(__BVEC_END((bio))) + (bio)->bi_size == bio_to_phys((nxt)))
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIO_SEG_BOUNDARY(q, b1, b2) \
__BIO_SEG_BOUNDARY(bvec_to_phys(__BVEC_END((b1))), bio_to_phys((b2)) + (b2)->bi_size, (q)->seg_boundary_mask)
-typedef int (bio_end_io_t) (struct bio *, int);
-typedef void (bio_destructor_t) (struct bio *);
-
#define bio_io_error(bio) bio_endio((bio), 0, bio_sectors((bio)))
#define bio_for_each_segment(bvl, bio, i) \
#include <linux/compiler.h>
/*
- * Initialization functions.
+ * get rid of this next...
*/
-extern int isp16_init(void);
-extern int cdu31a_init(void);
-extern int acsi_init(void);
-extern int mcd_init(void);
-extern int mcdx_init(void);
-extern int sbpcd_init(void);
-extern int aztcd_init(void);
-extern int sony535_init(void);
-extern int gscd_init(void);
-extern int cm206_init(void);
-extern int optcd_init(void);
-extern int sjcd_init(void);
-extern int cdi_init(void);
-extern int hd_init(void);
extern int ide_init(void);
-extern int xd_init(void);
-extern int mfm_init(void);
-extern int loop_init(void);
-extern int md_init(void);
-extern int ap_init(void);
-extern int ddv_init(void);
-extern int z2_init(void);
-extern int swim3_init(void);
-extern int swimiop_init(void);
-extern int amiga_floppy_init(void);
-extern int atari_floppy_init(void);
-extern int ez_init(void);
-extern int bpcd_init(void);
-extern int ps2esdi_init(void);
-extern int jsfd_init(void);
-extern int viodasd_init(void);
-extern int viocd_init(void);
-
-#if defined(CONFIG_ARCH_S390)
-extern int dasd_init(void);
-extern int xpram_init(void);
-extern int tapeblock_init(void);
-#endif /* CONFIG_ARCH_S390 */
extern void set_device_ro(kdev_t dev,int flag);
-void add_blkdev_randomness(int major);
-
-extern int floppy_init(void);
-extern void rd_load(void);
-extern int rd_init(void);
-extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
-extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_image_start; /* starting block # of image */
+extern void add_blkdev_randomness(int major);
#ifdef CONFIG_BLK_DEV_INITRD
void initrd_init(void);
#endif
-
-
/*
* end_request() and friends. Must be called with the request queue spinlock
* acquired. All functions called within end_request() _must_be_ atomic.
* code duplication in drivers.
*/
+extern int end_that_request_first(struct request *, int, int);
+extern void end_that_request_last(struct request *);
+
static inline void blkdev_dequeue_request(struct request *req)
{
list_del(&req->queuelist);
}
-int end_that_request_first(struct request *, int uptodate, int nr_sectors);
-void end_that_request_last(struct request *);
+#define __elv_next_request(q) (q)->elevator.elevator_next_req_fn((q))
+
+extern inline struct request *elv_next_request(request_queue_t *q)
+{
+ struct request *rq;
+
+ while ((rq = __elv_next_request(q))) {
+ rq->flags |= REQ_STARTED;
+
+ if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
+ break;
+
+ /*
+ * all ok, break and return it
+ */
+ if (!q->prep_rq_fn(q, rq))
+ break;
+
+ /*
+ * prep said no-go, kill it
+ */
+ blkdev_dequeue_request(rq);
+ if (end_that_request_first(rq, 0, rq->nr_sectors))
+ BUG();
+
+ end_that_request_last(rq);
+ }
+
+ return rq;
+}
+
+extern inline void elv_add_request(request_queue_t *q, struct request *rq)
+{
+ blk_plug_device(q);
+ q->elevator.elevator_add_req_fn(q, rq, q->queue_head.prev);
+}
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#define CLEAR_INTR
#endif
-#define INIT_REQUEST \
- if (QUEUE_EMPTY) { \
- CLEAR_INTR; \
- return; \
- } \
- if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
- panic(DEVICE_NAME ": request list destroyed"); \
- if (!CURRENT->bio) \
- panic(DEVICE_NAME ": no bio"); \
+#define INIT_REQUEST \
+ if (QUEUE_EMPTY) { \
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (!CURRENT->bio) \
+ panic(DEVICE_NAME ": no bio"); \
#endif /* !defined(IDE_DRIVER) */
struct elevator_s;
typedef struct elevator_s elevator_t;
+struct request_list {
+ unsigned int count;
+ struct list_head free;
+ wait_queue_head_t wait;
+};
+
struct request {
struct list_head queuelist; /* looking for ->queue? you must _not_
* access it directly, use
* blkdev_dequeue_request! */
int elevator_sequence;
- int inactive; /* driver hasn't seen it yet */
+ unsigned char cmd[16];
+
+ unsigned long flags; /* see REQ_ bits below */
int rq_status; /* should split this into a few status bits */
kdev_t rq_dev;
- int cmd; /* READ or WRITE */
int errors;
sector_t sector;
unsigned long nr_sectors;
- unsigned long hard_sector, hard_nr_sectors;
+ unsigned long hard_sector; /* the hard_* are block layer
+ * internals, no driver should
+ * touch them
+ */
+ unsigned long hard_nr_sectors;
unsigned short nr_segments;
unsigned short nr_hw_segments;
unsigned int current_nr_sectors;
struct completion *waiting;
struct bio *bio, *biotail;
request_queue_t *q;
+ struct request_list *rl;
};
+/*
+ * first three bits match BIO_RW* bits, important
+ */
+enum rq_flag_bits {
+ __REQ_RW, /* not set, read. set, write */
+ __REQ_RW_AHEAD, /* READA */
+ __REQ_BARRIER, /* may not be passed */
+ __REQ_CMD, /* is a regular fs rw request */
+ __REQ_NOMERGE, /* don't touch this for merging */
+ __REQ_STARTED, /* drive already may have started this one */
+ __REQ_DONTPREP, /* don't call prep for this one */
+ /*
+ * for IDE
+ */
+ __REQ_DRIVE_CMD,
+ __REQ_DRIVE_TASK,
+
+ __REQ_PC, /* packet command (special) */
+ __REQ_BLOCK_PC, /* queued down pc from block layer */
+ __REQ_SENSE, /* sense retrival */
+
+ __REQ_SPECIAL, /* driver special command */
+
+ __REQ_NR_BITS, /* stops here */
+};
+
+#define REQ_RW (1 << __REQ_RW)
+#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD)
+#define REQ_BARRIER (1 << __REQ_BARRIER)
+#define REQ_CMD (1 << __REQ_CMD)
+#define REQ_NOMERGE (1 << __REQ_NOMERGE)
+#define REQ_STARTED (1 << __REQ_STARTED)
+#define REQ_DONTPREP (1 << __REQ_DONTPREP)
+#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
+#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
+#define REQ_PC (1 << __REQ_PC)
+#define REQ_SENSE (1 << __REQ_SENSE)
+#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
+#define REQ_SPECIAL (1 << __REQ_SPECIAL)
+
#include <linux/elevator.h>
typedef int (merge_request_fn) (request_queue_t *, struct request *,
typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
+typedef int (prep_rq_fn) (request_queue_t *, struct request *);
typedef void (unplug_device_fn) (void *q);
enum blk_queue_state {
*/
#define QUEUE_NR_REQUESTS 8192
-struct request_list {
- unsigned int count;
- struct list_head free;
- wait_queue_head_t wait;
-};
-
struct request_queue
{
/*
struct list_head queue_head;
elevator_t elevator;
- request_fn_proc * request_fn;
- merge_request_fn * back_merge_fn;
- merge_request_fn * front_merge_fn;
- merge_requests_fn * merge_requests_fn;
- make_request_fn * make_request_fn;
+ request_fn_proc *request_fn;
+ merge_request_fn *back_merge_fn;
+ merge_request_fn *front_merge_fn;
+ merge_requests_fn *merge_requests_fn;
+ make_request_fn *make_request_fn;
+ prep_rq_fn *prep_rq_fn;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
*/
- void * queuedata;
+ void *queuedata;
/*
* queue needs bounce pages for pages above this limit
#define QUEUE_FLAG_CLUSTER 2 /* cluster several segments into 1 */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
-
#define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
-
#define blk_queue_empty(q) elv_queue_empty(q)
-
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+#define rq_data_dir(rq) ((rq)->flags & 1)
+
/*
* noop, requests are automagically marked as active/inactive by I/O
* scheduler -- see elv_next_request
extern unsigned long blk_max_low_pfn, blk_max_pfn;
-#define __elv_next_request(q) (q)->elevator.elevator_next_req_fn((q))
-
-extern inline struct request *elv_next_request(request_queue_t *q)
-{
- struct request *rq = __elv_next_request(q);
-
- if (rq) {
- rq->inactive = 0;
- wmb();
- }
-
- return rq;
-}
-
#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
#endif /* CONFIG_HIGHMEM */
#define rq_for_each_bio(bio, rq) \
- for (bio = (rq)->bio; bio; bio = bio->bi_next)
+ if ((rq->bio)) \
+ for (bio = (rq)->bio; bio; bio = bio->bi_next)
struct blk_dev_struct {
/*
extern inline request_queue_t *blk_get_queue(kdev_t dev);
extern void blkdev_release_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
+extern struct request *blk_get_request(request_queue_t *, int, int);
+extern void blk_put_request(struct request *);
+extern void blk_plug_device(request_queue_t *);
+
+extern int block_ioctl(kdev_t, unsigned int, unsigned long);
/*
* Access functions for manipulating queue properties
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(void *);
extern int * blk_size[MAX_BLKDEV];
#define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next)
#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev)
-extern void drive_stat_acct (kdev_t dev, int rw,
- unsigned long nr_sectors, int new_io);
+extern void drive_stat_acct(struct request *, int, int);
extern inline void blk_clear(int major)
{
printk(format, ## a); } \
} while (0)
-#define ENTRY \
- if(coda_print_entry) printk("Process %d entered %s\n",current->pid,__FUNCTION__)
-
-#define EXIT \
- if(coda_print_entry) printk("Process %d leaving %s\n",current->pid,__FUNCTION__)
-
#define CODA_ALLOC(ptr, cast, size) \
do { \
if (size < PAGE_SIZE) { \