the last kdev_t object is gone; ->i_rdev switched to dev_t.
int noctty, retval;
struct tty_driver *driver;
int index;
- dev_t device = kdev_t_to_nr(inode->i_rdev);
+ dev_t device = inode->i_rdev;
unsigned short saved_flags = filp->f_flags;
retry_open:
noctty = filp->f_flags & O_NOCTTY;
goto out;
}
- *dev = kdev_t_to_nr(inode->i_rdev);
+ *dev = inode->i_rdev;
out:
path_release(&nd);
bdev->bd_part_count = 0;
bdev->bd_invalidated = 0;
inode->i_mode = S_IFBLK;
- inode->i_rdev = to_kdev_t(dev);
+ inode->i_rdev = dev;
inode->i_bdev = bdev;
inode->i_data.a_ops = &def_blk_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
return 0;
}
spin_unlock(&bdev_lock);
- bdev = bdget(kdev_t_to_nr(inode->i_rdev));
+ bdev = bdget(inode->i_rdev);
if (!bdev)
return -ENOMEM;
spin_lock(&bdev_lock);
struct kobject *kobj;
int idx;
spin_unlock(&cdev_lock);
- kobj = kobj_lookup(cdev_map, kdev_t_to_nr(inode->i_rdev), &idx);
+ kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
if (!kobj)
return -ENODEV;
new = container_of(kobj, struct cdev, kobj);
} else {
cFYI(1, (" Init special inode "));
init_special_inode(tmp_inode, tmp_inode->i_mode,
- kdev_t_to_nr(tmp_inode->i_rdev));
+ tmp_inode->i_rdev);
}
}
} else {
cFYI(1, (" Init special inode "));
init_special_inode(tmp_inode, tmp_inode->i_mode,
- kdev_t_to_nr(tmp_inode->i_rdev));
+ tmp_inode->i_rdev);
}
}
} else {
cFYI(1, (" Init special inode "));
init_special_inode(inode, inode->i_mode,
- kdev_t_to_nr(inode->i_rdev));
+ inode->i_rdev);
}
}
FreeXid(xid);
inode->i_op = &cifs_symlink_inode_ops;
} else {
init_special_inode(inode, inode->i_mode,
- kdev_t_to_nr(inode->i_rdev));
+ inode->i_rdev);
}
}
if(buf)
inode->i_fop = &devfs_fops;
if ( S_ISCHR (de->mode) )
{
- inode->i_rdev = to_kdev_t(de->u.cdev.dev);
+ inode->i_rdev = de->u.cdev.dev;
}
else if ( S_ISBLK (de->mode) )
{
- inode->i_rdev = to_kdev_t(de->u.bdev.dev);
+ inode->i_rdev = de->u.bdev.dev;
if (bd_acquire (inode) != 0)
PRINTK ("(%d): no block device from bdget()\n",(int)inode->i_ino);
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
+ raw_inode->i_block[0] = cpu_to_le32(inode->i_rdev);
else for (n = 0; n < EXT2_N_BLOCKS; n++)
raw_inode->i_block[n] = ei->i_data[n];
mark_buffer_dirty(bh);
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_block[0] =
- cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
+ cpu_to_le32(inode->i_rdev);
else for (block = 0; block < EXT3_N_BLOCKS; block++)
raw_inode->i_block[block] = ei->i_data[block];
hpfs_inode->i_ea_mode = 1;
}
if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) {
- int d = kdev_t_to_nr(i->i_rdev);
+ dev_t d = i->i_rdev;
ea[0] = d & 0xff;
ea[1] = (d >> 8) & 0xff;
ea[2] = (d >> 16) & 0xff;
inode->i_pipe = NULL;
inode->i_bdev = NULL;
inode->i_cdev = NULL;
- inode->i_rdev = to_kdev_t(0);
+ inode->i_rdev = 0;
inode->i_security = NULL;
if (security_inode_alloc(inode)) {
if (inode->i_sb->s_op->destroy_inode)
inode->i_mode = mode;
if (S_ISCHR(mode)) {
inode->i_fop = &def_chr_fops;
- inode->i_rdev = to_kdev_t(rdev);
+ inode->i_rdev = rdev;
} else if (S_ISBLK(mode)) {
inode->i_fop = &def_blk_fops;
- inode->i_rdev = to_kdev_t(rdev);
+ inode->i_rdev = rdev;
} else if (S_ISFIFO(mode))
inode->i_fop = &def_fifo_fops;
else if (S_ISSOCK(mode))
void izo_get_rollback_data(struct inode *inode, struct izo_rollback_data *rb)
{
rb->rb_mode = (__u32)inode->i_mode;
- rb->rb_rdev = (__u32)kdev_t_to_nr(inode->i_rdev);
+ rb->rb_rdev = (__u32)inode->i_rdev;
rb->rb_uid = (__u64)inode->i_uid;
rb->rb_gid = (__u64)inode->i_gid;
}
inode->i_data.a_ops = &isofs_symlink_aops;
} else
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
- init_special_inode(inode, inode->i_mode,
- kdev_t_to_nr(inode->i_rdev));
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
out:
if (tmpde)
* stored in the low field, and use that.
*/
if((low & ~0xff) && high == 0) {
- inode->i_rdev = mk_kdev(low >> 8, low & 0xff);
+ inode->i_rdev = MKDEV(low >> 8, low & 0xff);
} else {
- inode->i_rdev = mk_kdev(high, low);
+ inode->i_rdev = MKDEV(high, low);
}
}
break;
int err;
struct nameidata nd;
int mtdnr;
- kdev_t dev;
+ dev_t dev;
if (!dev_name)
return ERR_PTR(-EINVAL);
dev = nd.dentry->d_inode->i_rdev;
path_release(&nd);
- if (major(dev) != MTD_BLOCK_MAJOR) {
+ if (MAJOR(dev) != MTD_BLOCK_MAJOR) {
if (!(flags & MS_VERBOSE)) /* Yes I mean this. Strangely */
printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n",
dev_name);
return ERR_PTR(-EINVAL);
}
- return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, minor(dev));
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, MINOR(dev));
}
inode->i_op = &jfs_symlink_inode_operations;
} else {
inode->i_op = &jfs_file_inode_operations;
- init_special_inode(inode, inode->i_mode,
- kdev_t_to_nr(inode->i_rdev));
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
}
}
jfs_ip->acltype = le32_to_cpu(dip->di_acltype);
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
- ip->i_rdev = to_kdev_t(le32_to_cpu(dip->di_rdev));
+ ip->i_rdev = le32_to_cpu(dip->di_rdev);
if (S_ISDIR(ip->i_mode)) {
memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384);
dip->di_acltype = cpu_to_le32(jfs_ip->acltype);
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
- dip->di_rdev = cpu_to_le32(kdev_t_to_nr(ip->i_rdev));
+ dip->di_rdev = cpu_to_le32(ip->i_rdev);
}
#ifdef _JFS_DEBUG_IMAP
raw_inode->i_size = inode->i_size;
raw_inode->i_time = inode->i_mtime.tv_sec;
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- raw_inode->i_zone[0] = kdev_t_to_nr(inode->i_rdev);
+ raw_inode->i_zone[0] = inode->i_rdev;
else for (i = 0; i < 9; i++)
raw_inode->i_zone[i] = minix_inode->u.i1_data[i];
mark_buffer_dirty(bh);
raw_inode->i_atime = inode->i_atime.tv_sec;
raw_inode->i_ctime = inode->i_ctime.tv_sec;
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- raw_inode->i_zone[0] = kdev_t_to_nr(inode->i_rdev);
+ raw_inode->i_zone[0] = inode->i_rdev;
else for (i = 0; i < 10; i++)
raw_inode->i_zone[i] = minix_inode->u.i2_data[i];
mark_buffer_dirty(bh);
case S_IFCHR:
case S_IFBLK:
/* reserve rdev for later checking */
- attr->ia_size = kdev_t_to_nr(inode->i_rdev);
+ attr->ia_size = inode->i_rdev;
attr->ia_valid |= ATTR_SIZE;
/* FALLTHROUGH */
/* Make sure the type and device matches */
nfserr = nfserr_exist;
if (inode && (type != (inode->i_mode & S_IFMT) ||
- (is_borc && kdev_t_to_nr(inode->i_rdev) != rdev)))
+ (is_borc && inode->i_rdev != rdev)))
goto out_unlock;
}
set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec );
set_sd_v2_blocks(sd_v2, inode->i_blocks );
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- set_sd_v2_rdev(sd_v2, kdev_t_to_nr(inode->i_rdev) );
+ set_sd_v2_rdev(sd_v2, inode->i_rdev);
else
set_sd_v2_generation(sd_v2, inode->i_generation);
flags = REISERFS_I(inode)->i_attrs;
set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec );
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- set_sd_v1_rdev(sd_v1, kdev_t_to_nr(inode->i_rdev) );
+ set_sd_v1_rdev(sd_v1, inode->i_rdev);
else
set_sd_v1_blocks(sd_v1, inode->i_blocks );
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
- stat->rdev = kdev_t_to_nr(inode->i_rdev);
+ stat->rdev = inode->i_rdev;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
si = SYSV_I(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- si->i_data[0] = cpu_to_fs32(sbi, kdev_t_to_nr(inode->i_rdev));
+ si->i_data[0] = cpu_to_fs32(sbi, inode->i_rdev);
for (block = 0; block < 10+1+1+1; block++)
write3byte(sbi, (u8 *)&si->i_data[block],
&raw_inode->i_data[3*block]);
strcpy(eid->ident, UDF_ID_DEVELOPER);
eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
eid->identSuffix[1] = UDF_OS_ID_LINUX;
- dsea->majorDeviceIdent = kdev_t_to_nr(inode->i_rdev) >> 8;
- dsea->minorDeviceIdent = kdev_t_to_nr(inode->i_rdev) & 0xFF;
+ dsea->majorDeviceIdent = inode->i_rdev >> 8;
+ dsea->minorDeviceIdent = inode->i_rdev & 0xFF;
mark_buffer_dirty_inode(tbh, inode);
udf_release_data(tbh);
}
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, kdev_t_to_nr(inode->i_rdev));
+ ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev);
else if (inode->i_blocks) {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
ip = LINVFS_GET_IP(vp);
if (S_ISCHR(mode) || S_ISBLK(mode))
- ip->i_rdev = to_kdev_t(rdev);
+ ip->i_rdev = rdev;
else if (S_ISDIR(mode))
validate_fields(ip);
d_instantiate(dentry, ip);
} else {
inode->i_op = &linvfs_file_inode_operations;
init_special_inode(inode, inode->i_mode,
- kdev_t_to_nr(inode->i_rdev));
+ inode->i_rdev);
}
}
inode->i_uid = ip->i_d.di_uid;
inode->i_gid = ip->i_d.di_gid;
if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
- inode->i_rdev = NODEV;
+ inode->i_rdev = 0;
} else {
xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
inode->i_rdev = XFS_DEV_TO_KDEVT(dev);
#define XFS_MKDEV(major,minor) ((xfs_dev_t)(((major)<<XFS_DEV_BITSMINOR) \
| (minor&XFS_DEV_MAXMIN)))
-#define XFS_DEV_TO_KDEVT(dev) mk_kdev(XFS_DEV_MAJOR(dev),XFS_DEV_MINOR(dev))
+#define XFS_DEV_TO_KDEVT(dev) MKDEV(XFS_DEV_MAJOR(dev),XFS_DEV_MINOR(dev))
#endif /* !__XFS_TYPES_H */
kdb_printf(
" i_mode = 0x%x i_nlink = %d i_rdev = 0x%x i_state = 0x%lx\n",
ip->i_mode, ip->i_nlink,
- kdev_t_to_nr(ip->i_rdev), ip->i_state);
+ ip->i_rdev, ip->i_state);
kdb_printf(" i_hash.nxt = 0x%p i_hash.prv = 0x%p\n",
ip->i_hash.next, ip->i_hash.prev);
unsigned int i_nlink;
uid_t i_uid;
gid_t i_gid;
- kdev_t i_rdev;
+ dev_t i_rdev;
loff_t i_size;
struct timespec i_atime;
struct timespec i_mtime;
static inline unsigned iminor(struct inode *inode)
{
- return minor(inode->i_rdev);
+ return MINOR(inode->i_rdev);
}
static inline unsigned imajor(struct inode *inode)
{
- return major(inode->i_rdev);
+ return MAJOR(inode->i_rdev);
}
struct fown_struct {