static void do_stram_request(request_queue_t *q)
{
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
void *start = swap_start + (req->sector << 9);
unsigned long len = req->current_nr_sectors << 9;
if ((start + len) > swap_end) {
{
printk("FDC1772: fd_error\n");
/*panic("fd1772: fd_error"); *//* DAG tmp */
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
CURRENT->errors++;
if (CURRENT->errors >= MAX_ERRORS) {
DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
- !blk_queue_empty(QUEUE) ? CURRENT->sector : 0));
+ CURRENT ? CURRENT->sector : 0));
repeat:
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
goto the_end;
floppy = CURRENT->rq_disk->private_data;
/* No - its the end of the line */
/* end_request's should have happened at the end of sector DMAs */
/* Turns Drive LEDs off - may slow it down? */
- if (blk_queue_empty(QUEUE))
+ if (!elv_next_request(QUEUE))
issue_command(CMD_CKV, block, 2);
Busy = 0;
DBG("mfm_request: loop start\n");
sti();
- DBG("mfm_request: before blk_queue_empty\n");
+ DBG("mfm_request: before !CURRENT\n");
- if (blk_queue_empty(QUEUE)) {
+ if (!CURRENT) {
printk("mfm_request: Exiting due to empty queue (pre)\n");
do_mfm = NULL;
Busy = 0;
return false;
while (true) {
- if (blk_queue_empty(RequestQueue))
- return false;
-
Request = elv_next_request(RequestQueue);
+ if (!Request)
+ return false;
+
Command = DAC960_AllocateCommand(Controller);
if (Command != NULL)
break;
static void bad_rw_intr( void )
{
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
if (++CURRENT->errors >= MAX_ERRORS)
do_acsi = NULL;
printk( KERN_ERR "ACSI timeout\n" );
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
if (++CURRENT->errors >= MAX_ERRORS) {
#ifdef DEBUG
if (do_acsi)
return;
- if (blk_queue_empty(QUEUE)) {
+ if (!CURRENT) {
do_acsi = NULL;
ENABLE_IRQ();
stdma_release();
unsigned long flags;
repeat:
- if (blk_queue_empty(QUEUE)) {
+ if (!CURRENT) {
/* Nothing left to do */
return;
}
return;
}
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
CURRENT->errors++;
struct atari_floppy_struct *floppy;
DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
- CURRENT, !blk_queue_empty(QUEUE) ? CURRENT->rq_disk->disk_name : "",
- !blk_queue_empty(QUEUE) ? CURRENT->sector : 0 ));
+ CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
+ CURRENT ? CURRENT->sector : 0 ));
IsFormatting = 0;
repeat:
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
goto the_end;
floppy = CURRENT->rq_disk->private_data;
goto startio;
queue:
- if (blk_queue_empty(q))
+ creq = elv_next_request(q);
+ if (!creq)
goto startio;
- creq = elv_next_request(q);
if (creq->nr_phys_segments > MAXSGENTRIES)
BUG();
goto startio;
queue_next:
- if (blk_queue_empty(q))
+ creq = elv_next_request(q);
+ if (!creq)
goto startio;
- creq = elv_next_request(q);
if (creq->nr_phys_segments > SG_MAX)
BUG();
again:
insert_here = NULL;
- if (blk_queue_empty(q)) {
+ if (elv_queue_empty(q)) {
blk_plug_device(q);
goto get_rq;
}
static void do_nbd_request(request_queue_t * q)
{
-
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
struct nbd_device *lo;
if (!(req->flags & REQ_CMD))
if (pcd_busy)
return;
while (1) {
- if (blk_queue_empty(q))
- return;
pcd_req = elv_next_request(q);
+ if (!pcd_req)
+ return;
+
if (rq_data_dir(pcd_req) == READ) {
struct pcd_unit *cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
if (pd_busy)
return;
repeat:
- if (blk_queue_empty(q))
+ pd_req = elv_next_request(q);
+ if (!pd_req)
return;
- pd_req = elv_next_request(q);
pd_block = pd_req->sector;
pd_run = pd_req->nr_sectors;
pd_count = pd_req->current_nr_sectors;
/* since, this routine is called with interrupts cleared - they
must be before it finishes */
- /* standard procedure to ensure that requests are really on the
- list + sanity checks. */
- if (blk_queue_empty(q))
- return;
-
req = elv_next_request(q);
+ if (!req)
+ return;
#if 0
printk("%s:got request. device : %s command : %d sector : %ld count : %ld, buffer: %p\n",
static void start_request(struct floppy_state *fs)
{
+ struct request *req;
unsigned long x;
if (fs->state == idle && fs->wanted) {
wake_up(&fs->wait);
return;
}
- while (!blk_queue_empty(&swim3_queue) && fs->state == idle) {
- struct request *req = elv_next_request(&swim3_queue);
+ while (fs->state == idle && (req = elv_next_request(&swim3_queue))) {
#if 0
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
req->rq_disk->disk_name, req->cmd,
wake_up(&fs->wait);
return;
}
- while (!blk_queue_empty(&swim_queue) && fs->state == idle) {
+ while (CURRENT && fs->state == idle) {
if (CURRENT->bh && !buffer_locked(CURRENT->bh))
panic("floppy: block not locked");
#if 0
/* do_xd_request: handle an incoming request */
static void do_xd_request (request_queue_t * q)
{
+ struct request *req;
+
if (xdc_busy)
return;
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ while ((req = elv_next_request(q)) != NULL) {
unsigned block = req->sector;
unsigned count = req->nr_sectors;
int rw = rq_data_dir(req);
static void do_z2_request(request_queue_t *q)
{
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ struct request *req;
+ while ((req = elv_next_request) != NULL) {
unsigned long start = req->sector << 9;
unsigned long len = req->current_nr_sectors << 9;
static int current_valid(void)
{
- return !blk_queue_empty(QUEUE) &&
+ return CURRENT &&
CURRENT->cmd == READ &&
CURRENT->sector != -1;
}
* The beginning here is stolen from the hard disk driver. I hope
* it's right.
*/
- if (blk_queue_empty(q))
+ req = elv_next_request(q);
+ if (!req)
goto end_do_cdu31a_request;
if (!sony_spun_up)
scd_spinup();
- req = elv_next_request(q);
block = req->sector;
nblock = req->nr_sectors;
struct request *req;
while (1) { /* repeat until all requests have been satisfied */
- if (blk_queue_empty(q))
+ req = elv_next_request(q);
+ if (!req)
return;
- req = elv_next_request(q);
if (req->cmd != READ) {
debug(("Non-read command %d on cdrom\n", req->cmd));
end_request(req, 0);
unsigned int nsect;
repeat:
- if (blk_queue_empty(&gscd_queue))
+ req = elv_next_request(&gscd_queue);
+ if (!req)
return;
- req = elv_next_request(&gscd_queue);
block = req->sector;
nsect = req->nr_sectors;
static int current_valid(void)
{
- return !blk_queue_empty(QUEUE) &&
+ return CURRENT &&
CURRENT->cmd == READ &&
CURRENT->sector != -1;
}
again:
- if (blk_queue_empty(q))
+ req = elv_next_request(q);
+ if (!req)
return;
- req = elv_next_request(q);
stuffp = req->rq_disk->private_data;
if (!stuffp->present) {
static int current_valid(void)
{
- return !blk_queue_empty(QUEUE) &&
+ return CURRENT &&
CURRENT->cmd == READ &&
CURRENT->sector != -1;
}
#ifdef DEBUG_GTL
xnr=++xx_nr;
- if(blk_queue_empty(q))
+ req = elv_next_request(q);
+
+ if (!req)
{
printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n",
xnr, current->pid, jiffies);
return;
}
- req = elv_next_request(q);
-
printk(" do_sbpcd_request[%di](%p:%ld+%ld), Pid:%d, Time:%li\n",
xnr, req, req->sector, req->nr_sectors, current->pid, jiffies);
#endif
- if (blk_queue_empty(q))
+
+ req = elv_next_request(q); /* take out our request so no other */
+ if (!req)
return;
- req = elv_next_request(q); /* take out our request so no other */
if (req -> sector == -1)
end_request(req, 0);
spin_unlock_irq(q->queue_lock);
static int current_valid(void)
{
- return !blk_queue_empty(QUEUE) &&
+ return CURRENT &&
CURRENT->cmd == READ &&
CURRENT->sector != -1;
}
Byte cmd[2];
while (1) {
- if (blk_queue_empty(q))
+ req = elv_next_request(q);
+ if (!req)
return;
- req = elv_next_request(q);
block = req->sector;
nsect = req->nr_sectors;
if (!(req->flags & REQ_CMD))
best = NULL;
drive = hwgroup->drive;
do {
- if (!blk_queue_empty(&drive->queue) && (!drive->sleep || time_after_eq(jiffies, drive->sleep))) {
+ if ((!drive->sleep || time_after_eq(jiffies, drive->sleep))
+ && !elv_queue_empty(&drive->queue)) {
if (!best
|| (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
|| (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
static void dump_status (const char *msg, unsigned int stat)
{
char *name = "hd?";
- if (!blk_queue_empty(QUEUE))
+ if (CURRENT)
name = CURRENT->rq_disk->disk_name;
#ifdef VERBOSE_ERRORS
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
- if (!blk_queue_empty(QUEUE))
+ if (CURRENT)
printk(", sector=%ld", CURRENT->sector);
}
printk("\n");
*/
static void bad_rw_intr(void)
{
- if (!blk_queue_empty(QUEUE)) {
- struct request *req = CURRENT;
+ struct request *req = CURRENT;
+ if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
end_request(req, 0);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (!blk_queue_empty(QUEUE))
+ if (elv_next_request(QUEUE))
hd_request();
return;
}
do_hd = NULL;
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
disable_irq(HD_IRQ);
del_timer(&device_timer);
local_irq_enable();
- if (blk_queue_empty(QUEUE)) {
+ req = CURRENT;
+ if (!req) {
do_hd = NULL;
return;
}
- req = CURRENT;
if (reset) {
local_irq_disable();
{
char devc;
- devc = !blk_queue_empty(QUEUE) ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
+ devc = CURRENT ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
#ifdef VERBOSE_ERRORS
printk("hd%c: %s: status=0x%02x { ", devc, msg, stat & 0xff);
if (stat & BUSY_STAT) printk("Busy ");
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
- if (!blk_queue_empty(QUEUE))
+ if (CURRENT)
printk(", sector=%ld", CURRENT->sector);
}
printk("\n");
{
int dev;
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
dev = DEVICE_NR(CURRENT->rq_dev);
if (++CURRENT->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (!blk_queue_empty(QUEUE))
+ if (CURRENT)
hd_request();
return;
}
do_hd = NULL;
- if (blk_queue_empty(QUEUE))
+ if (!CURRENT)
return;
disable_irq(HD_IRQ);
del_timer(&device_timer);
local_irq_enable();
- if (blk_queue_empty(QUEUE)) {
+ if (!CURRENT) {
do_hd = NULL;
return;
}
struct i2ob_device *dev;
u32 m;
- while (blk_queue_empty(q)) {
+ while ((req = elv_next_request(q)) != NULL) {
/*
* On an IRQ completion if there is an inactive
* request on the queue head it means it isnt yet
* ready to dispatch.
*/
- req = elv_next_request(q);
-
if(req->rq_status == RQ_INACTIVE)
return;
do {
// sti();
- if (blk_queue_empty(q))
- return;
req = elv_next_request(q);
+ if (!req)
+ return;
part = req->rq_disk->private_data;
if (part) {
ret = 0;
static struct request_queue mtd_queue;
static void handle_mtdblock_request(void)
{
+ struct request *req;
struct mtdblk_dev *mtdblk;
unsigned int res;
- while (!blk_queue_empty(&mtd_queue)) {
- struct request *req = elv_next_request(&mtd_queue);
+ while ((req = elv_next_request(&mtd_queue) != NULL) {
struct mtdblk_dev **p = req->rq_disk->private_data;
spin_unlock_irq(mtd_queue.queue_lock);
mtdblk = *p;
add_wait_queue(&thr_wq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(mtd_queue.queue_lock);
- if (blk_queue_empty(&mtd_queue) || blk_queue_plugged(&mtd_queue)) {
+ if (!elv_next_request(&mtd_queue) || blk_queue_plugged(&mtd_queue)) {
spin_unlock_irq(mtd_queue.queue_lock);
schedule();
remove_wait_queue(&thr_wq, &wait);
static void mtdblock_request(request_queue_t *q)
{
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
struct mtdro_dev *mdev = req->rq_disk->private_data;
struct mtd_info *mtd = mdev->mtd;
unsigned int res;
void nftl_request(struct request_queue *q)
{
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
unsigned block = req->sector;
unsigned nsect = req->current_nr_sectors;
char *buffer = req->buffer;
nr_queued++;
}
while (!blk_queue_plugged(queue) &&
- !blk_queue_empty(queue) &&
+ elv_next_request(queue) &&
nr_queued < DASD_CHANQ_MAX_SIZE) {
req = elv_next_request(queue);
if (device->ro_flag && rq_data_dir(req) == WRITE) {
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
- !blk_queue_empty(&device->blk_data.request_queue))
+ elv_next_request(&device->blk_data.request_queue))
tasklet_schedule(&device->blk_data.tasklet);
}
list_for_each(l, &device->req_queue)
nr_queued++;
while (!blk_queue_plugged(queue) &&
- !blk_queue_empty(queue) &&
+ elv_next_request(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE) {
req = elv_next_request(queue);
if (rq_data_dir(req) == WRITE) {
struct tape_device *device;
device = (struct tape_device *) queue->queuedata;
- while (!blk_queue_empty(queue)) {
+ while (elv_next_request(queue)) {
INIT_LIST_HEAD(&new_req);
spin_lock(get_ccwdev_lock(device->cdev));
__tape_process_blk_queue(device, &new_req);
struct tape_device *device;
device = (struct tape_device *) data;
- while (!blk_queue_empty(&device->blk_data.request_queue)) {
+ while (elv_next_request(&device->blk_data.request_queue)) {
INIT_LIST_HEAD(&new_req);
spin_lock_irq(get_ccwdev_lock(device->cdev));
__tape_process_blk_queue(device, &new_req);
static void jsfd_do_request(request_queue_t *q)
{
- while (!blk_queue_empty(q)) {
- struct request *req = elv_next_request(q);
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
struct jsfd_part *jdp = req->rq_disk->private_data;
unsigned long offset = req->sector << 9;
size_t len = req->current_nr_sectors << 9;
* with special case code, then spin off separate versions and
* use function pointers to pick the right one.
*/
- if (sdev->single_lun && blk_queue_empty(q) && sdev->device_busy ==0 &&
- !shost->host_blocked && !shost->host_self_blocked &&
- !((shost->can_queue > 0) && (shost->host_busy >=
- shost->can_queue))) {
+ if (sdev->single_lun && sdev->device_busy == 0 &&
+ !shost->host_blocked && !shost->host_self_blocked &&
+ !((shost->can_queue > 0) && (shost->host_busy >= shost->can_queue))
+ && elv_queue_empty(q)) {
list_for_each_entry(sdev2, &sdev->same_target_siblings,
same_target_siblings) {
if (!sdev2->device_blocked &&
- !blk_queue_empty(sdev2->request_queue)) {
+ !elv_queue_empty(sdev2->request_queue)) {
__blk_run_queue(sdev2->request_queue);
break;
}
#define blk_queue_plugged(q) !list_empty(&(q)->plug_list)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
-#define blk_queue_empty(q) elv_queue_empty(q)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)