static char *rq_flags[] = {
"REQ_RW",
- "REQ_RW_AHEAD",
+ "REQ_FAILFAST",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_CMD",
"REQ_DRIVE_CMD",
"REQ_DRIVE_TASK",
"REQ_DRIVE_TASKFILE",
+ "REQ_PREEMPT",
+ "REQ_PM_SUSPEND",
+ "REQ_PM_RESUME",
+ "REQ_PM_SHUTDOWN",
};
void blk_dump_rq_flags(struct request *rq, char *msg)
static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
- int el_ret, rw, nr_sectors, cur_nr_sectors, barrier;
+ int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra;
struct list_head *insert_here;
sector_t sector;
barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw);
+ ra = bio_flagged(bio, BIO_RW_AHEAD) || current->flags & PF_READAHEAD;
+
again:
insert_here = NULL;
spin_lock_irq(q->queue_lock);
/*
* READA bit set
*/
- if (bio_flagged(bio, BIO_RW_AHEAD))
+ if (ra)
goto end_io;
freereq = get_request_wait(q, rw);
if (barrier)
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ /*
+ * don't stack up retries for read ahead
+ */
+ if (ra)
+ req->flags |= REQ_FAILFAST;
+
req->errors = 0;
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
} else if (blk_fs_request(rq)) {
/* Handle errors from READ and WRITE requests. */
+ if (blk_noretry_request(rq))
+ cdrom_end_request(drive, 0);
+
if (sense_key == NOT_READY) {
/* Tray open. */
cdrom_saw_media_change (drive);
/* force an abort */
hwif->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
}
- if (rq->errors >= ERROR_MAX)
+ if (rq->errors >= ERROR_MAX || blk_noretry_request(rq))
DRIVER(drive)->end_request(drive, 0, 0);
else {
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
#if (DISK_RECOVERY_TIME > 0)
-Error So the User Has To Fix the Compilation And Stop Hacking Port 0x43
-Does anyone ever use this anyway ??
+#error So the User Has To Fix the Compilation And Stop Hacking Port 0x43. Does anyone ever use this anyway ??
/*
* For really screwy hardware (hey, at least it *can* be used with Linux)
if (!nr_sectors)
nr_sectors = rq->hard_cur_sectors;
+ /*
+ * if failfast is set on a request, override number of sectors and
+ * complete the whole request right now
+ */
+ if (blk_noretry_request(rq) && !uptodate)
+ nr_sectors = rq->hard_nr_sectors;
+
/*
* decide whether to reenable DMA -- 3 is a random magic for now,
* if we DMA timeout more than 3 times, just stay in PIO
mp_bh->bio = *bio;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+ mp_bh->bio.bi_flags |= (1 << BIO_RW_FAILFAST);
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
* bit 0 -- read (not set) or write (set)
* bit 1 -- rw-ahead when set
* bit 2 -- barrier
+ * bit 3 -- fail fast, don't want low level driver retries
*/
#define BIO_RW 0
#define BIO_RW_AHEAD 1
#define BIO_RW_BARRIER 2
+#define BIO_RW_FAILFAST 3
/*
* various member access, note that bio_data should of course not be used
* first three bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
- __REQ_RW, /* not set, read. set, write */
- __REQ_RW_AHEAD, /* READA */
+ __REQ_RW, /* not set, read. set, write */
+ __REQ_FAILFAST, /* no low level driver retries */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
- __REQ_CMD, /* is a regular fs rw request */
- __REQ_NOMERGE, /* don't touch this for merging */
- __REQ_STARTED, /* drive already may have started this one */
- __REQ_DONTPREP, /* don't call prep for this one */
- __REQ_QUEUED, /* uses queueing */
+ __REQ_CMD, /* is a regular fs rw request */
+ __REQ_NOMERGE, /* don't touch this for merging */
+ __REQ_STARTED, /* drive already may have started this one */
+ __REQ_DONTPREP, /* don't call prep for this one */
+ __REQ_QUEUED, /* uses queueing */
/*
* for ATA/ATAPI devices
*/
- __REQ_PC, /* packet command (special) */
- __REQ_BLOCK_PC, /* queued down pc from block layer */
- __REQ_SENSE, /* sense retrival */
+ __REQ_PC, /* packet command (special) */
+ __REQ_BLOCK_PC, /* queued down pc from block layer */
+ __REQ_SENSE, /* sense retrival */
- __REQ_FAILED, /* set if the request failed */
- __REQ_QUIET, /* don't worry about errors */
- __REQ_SPECIAL, /* driver suplied command */
+ __REQ_FAILED, /* set if the request failed */
+ __REQ_QUIET, /* don't worry about errors */
+ __REQ_SPECIAL, /* driver suplied command */
__REQ_DRIVE_CMD,
__REQ_DRIVE_TASK,
__REQ_DRIVE_TASKFILE,
__REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
- __REQ_NR_BITS, /* stops here */
+ __REQ_NR_BITS, /* stops here */
};
#define REQ_RW (1 << __REQ_RW)
-#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD)
+#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_CMD (1 << __REQ_CMD)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
+#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST)
#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME)
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
#define PF_LESS_THROTTLE 0x01000000 /* Throttle me less: I clena memory */
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
+#define PF_READAHEAD 0x00400000 /* I am doing read-ahead */
#ifdef CONFIG_SMP
extern int set_cpus_allowed(task_t *p, unsigned long new_mask);
{
unsigned page_idx;
struct pagevec lru_pvec;
+ int ret = 0;
+
+ current->flags |= PF_READAHEAD;
- if (mapping->a_ops->readpages)
- return mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
+ if (mapping->a_ops->readpages) {
+ ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
+ goto out;
+ }
pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
}
}
pagevec_lru_add(&lru_pvec);
- return 0;
+out:
+ current->flags &= ~PF_READAHEAD;
+ return ret;
}
/*