return 0;
end_io:
- bio->bi_end_io(bio, nr_sectors);
+ bio->bi_end_io(bio);
return 0;
}
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
kdevname(bio->bi_dev), (long long) bio->bi_sector);
end_io:
- bio->bi_end_io(bio, nr_sectors);
+ bio->bi_end_io(bio);
break;
}
/*
* our default bio end_io callback handler for a buffer_head mapping.
*/
-static int end_bio_bh_io_sync(struct bio *bio, int nr_sectors)
+static void end_bio_bh_io_sync(struct bio *bio)
{
struct buffer_head *bh = bio->bi_private;
- BIO_BUG_ON(nr_sectors != (bh->b_size >> 9));
-
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
- return 0;
}
/**
if (!bio->bi_size) {
req->bio = bio->bi_next;
- if (unlikely(bio_endio(bio, uptodate, total_nsect)))
- BUG();
+ bio_endio(bio, uptodate);
total_nsect = 0;
}
* operation and are ready to return a success/failure code to the buffer
* cache layer.
*/
-static int raid_end_bio_io(r1bio_t *r1_bio, int uptodate, int nr_sectors)
+static void raid_end_bio_io(r1bio_t *r1_bio, int uptodate)
{
struct bio *bio = r1_bio->master_bio;
- bio_endio(bio, uptodate, nr_sectors);
+ bio_endio(bio, uptodate);
free_r1bio(r1_bio);
-
- return 0;
}
/*
atomic_dec(&conf->mirrors[disk].nr_pending);
}
-static int end_request(struct bio *bio, int nr_sectors)
+static void end_request(struct bio *bio)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
* we have only one bio on the read side
*/
if (uptodate) {
- raid_end_bio_io(r1_bio, uptodate, nr_sectors);
- return 0;
+ raid_end_bio_io(r1_bio, uptodate);
+ return;
}
/*
* oops, read error:
printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n",
partition_name(bio->bi_dev), r1_bio->sector);
reschedule_retry(r1_bio);
- return 0;
+ return;
}
if (r1_bio->read_bio)
* already.
*/
if (atomic_dec_and_test(&r1_bio->remaining))
- raid_end_bio_io(r1_bio, uptodate, nr_sectors);
- return 0;
+ raid_end_bio_io(r1_bio, uptodate);
}
/*
* If all mirrors are non-operational
* then return an IO error:
*/
- raid_end_bio_io(r1_bio, 0, 0);
+ raid_end_bio_io(r1_bio, 0);
return 0;
}
atomic_set(&r1_bio->remaining, sum_bios);
#define REDIRECT_SECTOR KERN_ERR \
"raid1: %s: redirecting sector %lu to another mirror\n"
-static int end_sync_read(struct bio *bio, int nr_sectors)
+static void end_sync_read(struct bio *bio)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
else
set_bit(R1BIO_Uptodate, &r1_bio->state);
reschedule_retry(r1_bio);
-
- return 0;
}
-static int end_sync_write(struct bio *bio, int nr_sectors)
+static void end_sync_write(struct bio *bio)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
resume_device(conf);
put_buf(r1_bio);
}
- return 0;
}
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
map(mddev, &bio->bi_dev);
if (kdev_same(bio->bi_dev, dev)) {
printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector);
- raid_end_bio_io(r1_bio, 0, 0);
+ raid_end_bio_io(r1_bio, 0);
break;
}
printk(REDIRECT_SECTOR,
return NULL;
}
-static int bio_end_io_kio(struct bio *bio, int nr_sectors)
+static void bio_end_io_kio(struct bio *bio)
{
struct kiobuf *kio = (struct kiobuf *) bio->bi_private;
end_kio_request(kio, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
- return 0;
}
/**
end_kio_request(kio, !err);
}
-int bio_endio(struct bio *bio, int uptodate, int nr_sectors)
+void bio_endio(struct bio *bio, int uptodate)
{
if (uptodate)
set_bit(BIO_UPTODATE, &bio->bi_flags);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (bio->bi_end_io)
- return bio->bi_end_io(bio, nr_sectors);
-
- return 0;
+ bio->bi_end_io(bio);
}
static void __init biovec_init_pool(void)
* weee, c forward decl...
*/
struct bio;
-typedef int (bio_end_io_t) (struct bio *, int);
+typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
/*
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
-#define bio_io_error(bio) bio_endio((bio), 0, bio_sectors((bio)))
+#define bio_io_error(bio) bio_endio((bio), 0)
/*
* drivers should not use the __ version unless they _really_ want to
extern struct bio *bio_alloc(int, int);
extern void bio_put(struct bio *);
-extern int bio_endio(struct bio *, int, int);
+extern void bio_endio(struct bio *, int);
struct request_queue;
extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *);
}
}
-static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *pool)
+static inline void bounce_end_io(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
- int ret, i;
+ int i;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
goto out_eio;
}
out_eio:
- ret = bio_orig->bi_end_io(bio_orig, nr_sectors);
-
+ bio_orig->bi_end_io(bio_orig);
bio_put(bio);
- return ret;
}
-static int bounce_end_io_write(struct bio *bio, int nr_sectors)
+static void bounce_end_io_write(struct bio *bio)
{
- return bounce_end_io(bio, nr_sectors, page_pool);
+ bounce_end_io(bio, page_pool);
}
-static int bounce_end_io_write_isa(struct bio *bio, int nr_sectors)
+static void bounce_end_io_write_isa(struct bio *bio)
{
- return bounce_end_io(bio, nr_sectors, isa_page_pool);
+ bounce_end_io(bio, isa_page_pool);
}
-static inline int __bounce_end_io_read(struct bio *bio, int nr_sectors,
- mempool_t *pool)
+static inline void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
copy_to_high_bio_irq(bio_orig, bio);
- return bounce_end_io(bio, nr_sectors, pool);
+ bounce_end_io(bio, pool);
}
-static int bounce_end_io_read(struct bio *bio, int nr_sectors)
+static void bounce_end_io_read(struct bio *bio)
{
- return __bounce_end_io_read(bio, nr_sectors, page_pool);
+ __bounce_end_io_read(bio, page_pool);
}
-static int bounce_end_io_read_isa(struct bio *bio, int nr_sectors)
+static void bounce_end_io_read_isa(struct bio *bio)
{
- return __bounce_end_io_read(bio, nr_sectors, isa_page_pool);
+ return __bounce_end_io_read(bio, isa_page_pool);
}
void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)