(long *)arg);
case BLKSECTGET:
- if ((q = blk_get_queue(dev)) == NULL)
+ if ((q = bdev_get_queue(bdev)) == NULL)
return -EINVAL;
usval = q->max_sectors;
struct request *rq;
int close = 0, err;
- q = blk_get_queue(to_kdev_t(bdev->bd_dev));
+ q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
int blk_nohighio = 0;
/**
- * blk_get_queue: - return the queue that matches the given device
- * @dev: device
+ * bdev_get_queue: - return the queue that matches the given device
+ * @bdev: device
*
* Description:
* Given a specific device, return the queue that will hold I/O
* stored in the same location.
*
**/
-inline request_queue_t *blk_get_queue(kdev_t dev)
+inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{
- struct blk_dev_struct *bdev = blk_dev + major(dev);
-
- if (bdev->queue)
- return bdev->queue(dev);
+ kdev_t dev = to_kdev_t(bdev->bd_dev);
+ struct blk_dev_struct *p = blk_dev + major(dev);
+ if (p->queue)
+ return p->queue(dev);
else
return &blk_dev[major(dev)].request_queue;
}
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
struct backing_dev_info *ret = NULL;
- request_queue_t *q = blk_get_queue(to_kdev_t(bdev->bd_dev));
+ request_queue_t *q = bdev_get_queue(bdev);
if (q)
ret = &q->backing_dev_info;
* Stacking drivers are expected to know what they are doing.
*/
do {
- q = blk_get_queue(to_kdev_t(bio->bi_bdev->bd_dev));
+ q = bdev_get_queue(bio->bi_bdev);
if (!q) {
printk(KERN_ERR
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_last);
EXPORT_SYMBOL(blk_init_queue);
-EXPORT_SYMBOL(blk_get_queue);
+EXPORT_SYMBOL(bdev_get_queue);
EXPORT_SYMBOL(blk_cleanup_queue);
EXPORT_SYMBOL(blk_queue_make_request);
EXPORT_SYMBOL(blk_queue_bounce_limit);
if (!disk_active(disk))
return -ENODEV;
- q = blk_get_queue(rdev->dev);
+ q = bdev_get_queue(rdev->bdev);
if (!q) {
MD_BUG();
return -ENODEV;
sd_max_sectors[k] = MAX_PHYS_SEGMENTS*8;
}
- for (k = 0; k < N_USED_SD_MAJORS; k++) {
- request_queue_t *q = blk_get_queue(mk_kdev(SD_MAJOR(k), 0));
- blk_queue_hardsect_size(q, 512);
- }
-
for (k = 0; k < N_USED_SD_MAJORS; k++) {
int N = SCSI_DISKS_PER_MAJOR;
int set_blocksize(struct block_device *bdev, int size)
{
int oldsize;
- kdev_t dev = to_kdev_t(bdev->bd_dev);
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
- if (size < get_hardsect_size(dev))
+ if (size < bdev_hardsect_size(bdev))
return -EINVAL;
oldsize = bdev->bd_block_size;
extern int wipe_partitions(kdev_t dev);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(struct bio *bio);
-extern inline request_queue_t *blk_get_queue(kdev_t dev);
+extern inline request_queue_t *bdev_get_queue(struct block_device *bdev);
extern void blkdev_release_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
return retval;
}
-extern inline int get_hardsect_size(kdev_t dev)
-{
- return queue_hardsect_size(blk_get_queue(dev));
-}
-
extern inline int bdev_hardsect_size(struct block_device *bdev)
{
- return queue_hardsect_size(blk_get_queue(to_kdev_t(bdev->bd_dev)));
+ return queue_hardsect_size(bdev_get_queue(bdev));
}
#define blk_finished_io(nsects) do { } while (0)