This is invoked by mount -remount,resize=<blocks>.
See Documentation/filesystems/jfs.txt for more information.
translations. This requires CONFIG_NLS_UTF8 to be set
in the kernel .config file.
+resize=value Resize the volume to <value> blocks. JFS only supports
+ growing a volume, not shrinking it. This option is only
+ valid during a remount, when the volume is mounted
+ read-write. The resize keyword with no value will grow
+ the volume to the full size of the partition.
+
JFS TODO list:
Plans for our near term development items
- - implement online resize for extending JFS volumes
- enhance support for logfile on dedicated partition
- get access control list functionality operational
- get extended attributes functionality operational
jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \
jfs_unicode.o jfs_dtree.o jfs_inode.o \
jfs_extent.o symlink.o jfs_metapage.o \
- jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o
+ jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o resize.o
EXTRA_CFLAGS += -D_JFS_4K
filemap_fdatawait(ipbmap->i_mapping);
ipbmap->i_state |= I_DIRTY;
- diWriteSpecial(ipbmap);
+ diWriteSpecial(ipbmap, 0);
return (0);
}
dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
>> wbitno);
- word += 1;
+ word++;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
/* determine how many bits */
nb = nwords << L2DBWORD;
+ word += nwords;
}
}
filemap_fdatawrite(ipimap->i_mapping);
filemap_fdatawait(ipimap->i_mapping);
- diWriteSpecial(ipimap);
+ diWriteSpecial(ipimap, 0);
return (0);
}
* PARAMETERS:
* sb - filesystem superblock
* inum - aggregate inode number
+ * secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES:
* new inode - success
* NULL - i/o error.
*/
-struct inode *diReadSpecial(struct super_block *sb, ino_t inum)
+struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
uint address;
return ip;
}
- /*
- * If ip->i_number >= 32 (INOSPEREXT), then read from secondary
- * aggregate inode table.
- */
-
- if (inum >= INOSPEREXT) {
- address =
- addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
- inum -= INOSPEREXT;
- ASSERT(inum < INOSPEREXT);
+ if (secondary) {
+ address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
JFS_IP(ip)->ipimap = sbi->ipaimap2;
} else {
address = AITBL_OFF >> L2PSIZE;
JFS_IP(ip)->ipimap = sbi->ipaimap;
}
+
+ ASSERT(inum < INOSPEREXT);
+
ip->i_ino = inum;
address += inum >> 3; /* 8 inodes per 4K page */
*
* PARAMETERS:
* ip - special inode
+ * secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES: none
*/
-void diWriteSpecial(struct inode *ip)
+void diWriteSpecial(struct inode *ip, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
uint address;
ino_t inum = ip->i_ino;
metapage_t *mp;
- /*
- * If ip->i_number >= 32 (INOSPEREXT), then write to secondary
- * aggregate inode table.
- */
-
- if (!(ip->i_state & I_DIRTY))
- return;
-
ip->i_state &= ~I_DIRTY;
- if (inum >= INOSPEREXT) {
- address =
- addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
- inum -= INOSPEREXT;
- ASSERT(inum < INOSPEREXT);
- } else {
+ if (secondary)
+ address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
+ else
address = AITBL_OFF >> L2PSIZE;
- }
+
+ ASSERT(inum < INOSPEREXT);
address += inum >> 3; /* 8 inodes per 4K page */
/* if AIT2 ipmap2 is bad, do not try to update it */
if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT) /* s_flag */
return;
- ip = diReadSpecial(sb, FILESYSTEM_I + INOSPEREXT);
+ ip = diReadSpecial(sb, FILESYSTEM_I, 1);
if (ip == 0) {
JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
if ((rc = readSuper(sb, &mpsuper)))
/* external references */
extern int diUpdatePMap(struct inode *ipimap, unsigned long inum,
boolean_t is_free, tblock_t * tblk);
-#ifdef _STILL_TO_PORT
-extern int diExtendFS(inode_t * ipimap, inode_t * ipbmap);
-#endif /* _STILL_TO_PORT */
-
+extern int diExtendFS(struct inode *ipimap, struct inode *ipbmap);
extern int diMount(struct inode *);
extern int diUnmount(struct inode *, int);
extern int diRead(struct inode *);
-extern struct inode *diReadSpecial(struct super_block *, ino_t);
-extern void diWriteSpecial(struct inode *);
+extern struct inode *diReadSpecial(struct super_block *, ino_t, int);
+extern void diWriteSpecial(struct inode *, int);
extern void diFreeSpecial(struct inode *);
extern int diWrite(tid_t tid, struct inode *);
#endif /* _H_JFS_IMAP */
u32 logdev; /* 2: external log device */
uint aggregate; /* volume identifier in log record */
pxd_t logpxd; /* 8: pxd describing log */
+ pxd_t fsckpxd; /* 8: pxd describing fsck wkspc */
pxd_t ait2; /* 8: pxd describing AIT copy */
char uuid[16]; /* 16: 128-bit uuid for volume */
char loguuid[16]; /* 16: 128-bit uuid for log */
static int lmNextPage(log_t * log);
static int lmLogFileSystem(log_t * log, char *uuid, int activate);
-static int lmLogInit(log_t * log);
-static int lmLogShutdown(log_t * log);
static int lbmLogInit(log_t * log);
static void lbmLogShutdown(log_t * log);
* by setting syncbarrier flag.
*/
if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) {
- log->syncbarrier = 1;
+ set_bit(log_SYNCBARRIER, &log->flag);
jFYI(1, ("log barrier on: lsn=0x%x syncpt=0x%x\n", lsn,
log->syncpt));
}
if (!(log = kmalloc(sizeof(log_t), GFP_KERNEL)))
return ENOMEM;
memset(log, 0, sizeof(log_t));
+ init_waitqueue_head(&log->syncwait);
log->sb = sb; /* This should be a list */
* file system to log have 1-to-1 relationship;
*/
- log->flag = JFS_INLINELOG;
+ set_bit(log_INLINELOG, &log->flag);
log->bdev = sb->s_bdev;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
*
* serialization: single first open thread
*/
-static int lmLogInit(log_t * log)
+int lmLogInit(log_t * log)
{
int rc = 0;
lrd_t lrd;
*/
- if (!(log->flag & JFS_INLINELOG))
+ if (!test_bit(log_INLINELOG, &log->flag))
log->l2bsize = 12; /* XXX kludge alert XXX */
if ((rc = lbmRead(log, 1, &bpsuper)))
goto errout10;
}
/* initialize log inode from log superblock */
- if (log->flag & JFS_INLINELOG) {
+ if (test_bit(log_INLINELOG,&log->flag)) {
if (log->size != le32_to_cpu(logsuper->size)) {
rc = EINVAL;
goto errout20;
log, (unsigned long long) log->base, log->size));
}
- log->flag |= JFS_GROUPCOMMIT;
-/*
- log->flag |= JFS_LAZYCOMMIT;
-*/
log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);
log->syncpt = lsn;
log->sync = log->syncpt;
log->nextsync = LOGSYNC_DELTA(log->logsize);
- init_waitqueue_head(&log->syncwait);
jFYI(1, ("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x\n",
log->lsn, log->syncpt, log->sync));
jFYI(1, ("lmLogClose: log:0x%p\n", log));
- if (!(log->flag & JFS_INLINELOG))
+ if (!test_bit(log_INLINELOG, &log->flag))
goto externalLog;
/*
*
* serialization: single last close thread
*/
-static int lmLogShutdown(log_t * log)
+int lmLogShutdown(log_t * log)
{
int rc;
lrd_t lrd;
return rc;
}
-
-/*
- * lmLogQuiesce()
- */
-int lmLogQuiesce(log_t * log)
-{
- int rc;
-
- rc = lmLogShutdown(log);
-
- return rc;
-}
-
-
-/*
- * lmLogResume()
- */
-int lmLogResume(log_t * log, struct super_block *sb)
-{
- struct jfs_sb_info *sbi = JFS_SBI(sb);
- int rc;
-
- log->base = addressPXD(&sbi->logpxd);
- log->size =
- (lengthPXD(&sbi->logpxd) << sb->s_blocksize_bits) >> L2LOGPSIZE;
- rc = lmLogInit(log);
-
- return rc;
-}
-
-
/*
* log buffer manager (lbm)
* ------------------------
return 0;
}
-
-#ifdef _STILL_TO_PORT
/*
* NAME: lmLogFormat()/jfs_logform()
*
- * FUNCTION: format file system log (ref. jfs_logform()).
+ * FUNCTION: format file system log
*
* PARAMETERS:
- * log - log inode (with common mount inode base);
- * logAddress - start address of log space in FS block;
+ * log - volume log
+ * logAddress - start address of log space in FS block
* logSize - length of log space in FS block;
*
- * RETURN: 0 - success
- * -1 - i/o error
+ * RETURN: 0 - success
+ * -EIO - i/o error
+ *
+ * XXX: We're synchronously writing one page at a time. This needs to
+ * be improved by writing multiple pages at once.
*/
-int lmLogFormat(inode_t * ipmnt, s64 logAddress, int logSize)
+int lmLogFormat(log_t *log, s64 logAddress, int logSize)
{
- int rc = 0;
- cbuf_t *bp;
+ int rc = -EIO;
+ struct jfs_sb_info *sbi = JFS_SBI(log->sb);
logsuper_t *logsuper;
logpage_t *lp;
int lspn; /* log sequence page number */
struct lrd *lrd_ptr;
- int npbperpage, npages;
+ int npages = 0;
+ lbuf_t *bp;
jFYI(0, ("lmLogFormat: logAddress:%Ld logSize:%d\n",
- logAddress, logSize));
+ (long long)logAddress, logSize));
- /* allocate a JFS buffer */
- bp = rawAllocate();
+ /* allocate a log buffer */
+ bp = lbmAllocate(log, 1);
- /* map the logical block address to physical block address */
- bp->cm_blkno = logAddress << ipmnt->i_l2bfactor;
-
- npbperpage = LOGPSIZE >> ipmnt->i_l2pbsize;
- npages = logSize / (LOGPSIZE >> ipmnt->i_l2bsize);
+ npages = logSize >> sbi->l2nbperpage;
/*
* log space:
/*
* init log superblock: log page 1
*/
- logsuper = (logsuper_t *) bp->cm_cdata;
+ logsuper = (logsuper_t *) bp->l_ldata;
logsuper->magic = cpu_to_le32(LOGMAGIC);
logsuper->version = cpu_to_le32(LOGVERSION);
logsuper->state = cpu_to_le32(LOGREDONE);
- logsuper->flag = cpu_to_le32(ipmnt->i_mntflag); /* ? */
+ logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */
logsuper->size = cpu_to_le32(npages);
- logsuper->bsize = cpu_to_le32(ipmnt->i_bsize);
- logsuper->l2bsize = cpu_to_le32(ipmnt->i_l2bsize);
- logsuper->end =
- cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE);
+ logsuper->bsize = cpu_to_le32(sbi->bsize);
+ logsuper->l2bsize = cpu_to_le32(sbi->l2bsize);
+ logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE);
- bp->cm_blkno += npbperpage;
- rawWrite(ipmnt, bp, 0);
+ bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
+ bp->l_blkno = logAddress + sbi->nbperpage;
+ lbmStartIO(bp);
+ if ((rc = lbmIOWait(bp, 0)))
+ goto exit;
/*
* init pages 2 to npages-1 as log data pages:
* a circular file for the log records;
* lpsn grows by 1 monotonically as each log page is written
* to the circular file of the log;
- * Since the AIX DUMMY log record is dropped for this XJFS,
* and setLogpage() will not reset the page number even if
* the eor is equal to LOGPHDRSIZE. In order for binary search
* still work in find log end process, we have to simulate the
* the succeeding log pages will have ascending order of
* the lspn starting from 0, ... (N-2)
*/
- lp = (logpage_t *) bp->cm_cdata;
-
+ lp = (logpage_t *) bp->l_ldata;
/*
* initialize 1st log page to be written: lpsn = N - 1,
* write a SYNCPT log record is written to this page
lrd_ptr->length = 0;
lrd_ptr->log.syncpt.sync = 0;
- bp->cm_blkno += npbperpage;
- rawWrite(ipmnt, bp, 0);
+ bp->l_blkno += sbi->nbperpage;
+ bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
+ lbmStartIO(bp);
+ if ((rc = lbmIOWait(bp, 0)))
+ goto exit;
/*
* initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
lp->h.page = lp->t.page = cpu_to_le32(lspn);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
- bp->cm_blkno += npbperpage;
- rawWrite(ipmnt, bp, 0);
+ bp->l_blkno += sbi->nbperpage;
+ bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
+ lbmStartIO(bp);
+ if ((rc = lbmIOWait(bp, 0)))
+ goto exit;
}
+ rc = 0;
+exit:
/*
* finalize log
*/
/* release the buffer */
- rawRelease(bp);
+ lbmFree(bp);
return rc;
}
-#endif /* _STILL_TO_PORT */
-
#ifdef CONFIG_JFS_STATISTICS
int jfs_lmstats_read(char *buffer, char **start, off_t offset, int length,
int size; /* 4: log size in log page (in page) */
int l2bsize; /* 4: log2 of bsize */
- uint flag; /* 4: flag */
- uint state; /* 4: state */
+ long flag; /* 4: flag */
struct lbuf *lbuf_free; /* 4: free lbufs */
wait_queue_head_t free_wait; /* 4: */
/* syncpt */
int nextsync; /* 4: bytes to write before next syncpt */
int active; /* 4: */
- int syncbarrier; /* 4: */
wait_queue_head_t syncwait; /* 4: */
/* commit */
char uuid[16]; /* 16: 128-bit uuid of log device */
} log_t;
+/*
+ * Log flag
+ */
+#define log_INLINELOG 1
+#define log_SYNCBARRIER 2
+#define log_QUIESCE 3
+
/*
* group commit flag
*/
extern void lmLogWait(log_t * log);
extern int lmLogClose(struct super_block *sb, log_t * log);
extern int lmLogSync(log_t * log, int nosyncwait);
-extern int lmLogQuiesce(log_t * log);
-extern int lmLogResume(log_t * log, struct super_block *sb);
-extern int lmLogFormat(struct super_block *sb, s64 logAddress, int logSize);
+extern int lmLogShutdown(log_t * log);
+extern int lmLogInit(log_t * log);
+extern int lmLogFormat(log_t *log, s64 logAddress, int logSize);
#endif /* _H_JFS_LOGMGR */
goto errout20;
}
- ipaimap = diReadSpecial(sb, AGGREGATE_I);
+ ipaimap = diReadSpecial(sb, AGGREGATE_I, 0);
if (ipaimap == NULL) {
jERROR(1, ("jfs_mount: Faild to read AGGREGATE_I\n"));
rc = EIO;
/*
* open aggregate block allocation map
*/
- ipbmap = diReadSpecial(sb, BMAP_I);
+ ipbmap = diReadSpecial(sb, BMAP_I, 0);
if (ipbmap == NULL) {
rc = EIO;
goto errout22;
* table.
*/
if ((sbi->mntflag & JFS_BAD_SAIT) == 0) {
- ipaimap2 = diReadSpecial(sb, AGGREGATE_I + INOSPEREXT);
+ ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1);
if (ipaimap2 == 0) {
jERROR(1,
("jfs_mount: Faild to read AGGREGATE_I\n"));
/*
* open fileset inode allocation map (aka fileset inode)
*/
- ipimap = diReadSpecial(sb, FILESYSTEM_I);
+ ipimap = diReadSpecial(sb, FILESYSTEM_I, 0);
if (ipimap == NULL) {
jERROR(1, ("jfs_mount: Failed to read FILESYSTEM_I\n"));
/* open fileset secondary inode allocation map */
memcpy(sbi->uuid, j_sb->s_uuid, sizeof(sbi->uuid));
memcpy(sbi->loguuid, j_sb->s_loguuid, sizeof(sbi->uuid));
}
+ sbi->fsckpxd = j_sb->s_fsckpxd;
sbi->ait2 = j_sb->s_ait2;
out:
TXN_LOCK();
retry:
- if (flag != COMMIT_FORCE) {
+ if (!(flag & COMMIT_FORCE)) {
/*
* synchronize with logsync barrier
*/
- if (log->syncbarrier) {
+ if (test_bit(log_SYNCBARRIER, &log->flag) ||
+ test_bit(log_QUIESCE, &log->flag)) {
TXN_SLEEP(&log->syncwait);
goto retry;
}
if (flag == 0) {
/*
* Don't begin transaction if we're getting starved for tlocks
- * unless COMMIT_FORCE (imap changes) or COMMIT_INODE (which
- * may ultimately free tlocks)
+ * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
+ * free tlocks)
*/
if (TlocksLow) {
TXN_SLEEP(&TxAnchor.lowlockwait);
/*
* synchronize with logsync barrier
*/
- if (log->syncbarrier) {
+ if (test_bit(log_SYNCBARRIER, &log->flag) ||
+ test_bit(log_QUIESCE, &log->flag)) {
TXN_SLEEP(&log->syncwait);
goto retry;
}
/*
* synchronize with logsync barrier
*/
- if (log->syncbarrier && log->active == 0) {
+ if (test_bit(log_SYNCBARRIER, &log->flag) && log->active == 0) {
/* forward log syncpt */
/* lmSync(log); */
jFYI(1, (" log barrier off: 0x%x\n", log->lsn));
/* enable new transactions start */
- log->syncbarrier = 0;
+ clear_bit(log_SYNCBARRIER, &log->flag);
/* wakeup all waitors for logsync barrier */
TXN_WAKEUP(&log->syncwait);
*
* FUNCTION: Initiates pageout of pages modified by tid in journalled
* objects and frees their lockwords.
- *
- * PARAMETER:
- * flag -
- *
- * RETURN: Errors from subroutines.
*/
-static void txUnlock(tblock_t * tblk, int flag)
+static void txUnlock(tblock_t * tblk)
{
tlock_t *tlck;
linelock_t *linelock;
lid_t lid, next, llid, k;
metapage_t *mp;
log_t *log;
- int force;
int difft, diffp;
jFYI(1, ("txUnlock: tblk = 0x%p\n", tblk));
log = (log_t *) JFS_SBI(tblk->sb)->log;
- force = flag & COMMIT_FLUSH;
- if (log->syncbarrier)
- force |= COMMIT_FORCE;
/*
* mark page under tlock homeok (its log has been written):
- * if caller has specified FORCE (e.g., iRecycle()), or
- * if syncwait for the log is set (i.e., the log sync point
- * has fallen behind), or
- * if syncpt is set for the page, or
- * if the page is new, initiate pageout;
- * otherwise, leave the page in memory.
*/
for (lid = tblk->next; lid; lid = next) {
tlck = lid_to_tlock(lid);
txRelease(tblk);
if ((tblk->flag & tblkGC_LAZY) == 0)
- txUnlock(tblk, flag);
+ txUnlock(tblk);
/*
spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
if (tblk->flag & tblkGC_LAZY) {
- txUnlock(tblk, 0);
+ txUnlock(tblk);
tblk->flag &= ~tblkGC_LAZY;
txEnd(tblk - TxBlock); /* Convert back to tid */
}
release_metapage(mp);
}
+/*
+ * txQuiesce
+ *
+ * Block all new transactions and push anonymous transactions to
+ * completion
+ *
+ * This does almost the same thing as jfs_sync below. We don't
+ * worry about deadlocking when TlocksLow is set, since we would
+ * expect jfs_sync to get us out of that jam.
+ */
+void txQuiesce(struct super_block *sb)
+{
+ struct inode *ip;
+ struct jfs_inode_info *jfs_ip;
+ log_t *log = JFS_SBI(sb)->log;
+ int rc;
+ tid_t tid;
+
+ set_bit(log_QUIESCE, &log->flag);
+
+ TXN_LOCK();
+restart:
+ while (!list_empty(&TxAnchor.anon_list)) {
+ jfs_ip = list_entry(TxAnchor.anon_list.next,
+ struct jfs_inode_info,
+ anon_inode_list);
+ ip = &jfs_ip->vfs_inode;
+
+ /*
+ * inode will be removed from anonymous list
+ * when it is committed
+ */
+ TXN_UNLOCK();
+ tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
+ down(&jfs_ip->commit_sem);
+ rc = txCommit(tid, 1, &ip, 0);
+ txEnd(tid);
+ up(&jfs_ip->commit_sem);
+ /*
+ * Just to be safe. I don't know how
+ * long we can run without blocking
+ */
+ cond_resched();
+ TXN_LOCK();
+ }
+
+ /*
+ * If jfs_sync is running in parallel, there could be some inodes
+ * on anon_list2. Let's check.
+ */
+ if (!list_empty(&TxAnchor.anon_list2)) {
+ list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
+ INIT_LIST_HEAD(&TxAnchor.anon_list2);
+ goto restart;
+ }
+ TXN_UNLOCK();
+}
+
+/*
+ * txResume()
+ *
+ * Allows transactions to start again following txQuiesce
+ */
+void txResume(struct super_block *sb)
+{
+ log_t *log = JFS_SBI(sb)->log;
+
+ clear_bit(log_QUIESCE, &log->flag);
+ TXN_WAKEUP(&log->syncwait);
+}
+
/*
* jfs_sync(void)
*
* when it is committed
*/
TXN_UNLOCK();
- tid = txBegin(ip->i_sb, COMMIT_INODE);
+ tid = txBegin(ip->i_sb,
+ COMMIT_INODE | COMMIT_FORCE);
rc = txCommit(tid, 1, &ip, 0);
txEnd(tid);
up(&jfs_ip->commit_sem);
* so let's not block here. Save it to
* put back on the anon_list.
*/
- if (TxAnchor.anon_list.next !=
- &jfs_ip->anon_inode_list)
- continue;
/* Take off anon_list */
list_del(&jfs_ip->anon_inode_list);
extern int lmLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
+extern void txQuiesce(struct super_block *sb);
+
+extern void txResume(struct super_block *sb);
#endif /* _H_JFS_TXNMGR */
}
-#ifdef _STILL_TO_PORT
/*
* xtAppend()
*
* return:
*/
int xtAppend(tid_t tid, /* transaction id */
- struct inode *ip, int xflag, s64 xoff, s32 maxblocks, /* @GD1 */
+ struct inode *ip, int xflag, s64 xoff, s32 maxblocks,
s32 * xlenp, /* (in/out) */
s64 * xaddrp, /* (in/out) */
int flag)
pxdlist.maxnpxd = pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
nblocks = JFS_SBI(ip->i_sb)->nbperpage;
- for (; nsplit > 0; nsplit--, pxd++, xaddr += nblocks, maxblocks -= nblocks) { /* @GD1 */
+ for (; nsplit > 0; nsplit--, pxd++, xaddr += nblocks, maxblocks -= nblocks) {
if ((rc = dbAllocBottomUp(ip, xaddr, (s64) nblocks)) == 0) {
PXDaddress(pxd, xaddr);
PXDlength(pxd, nblocks);
goto out;
}
- xlen = min(xlen, maxblocks); /* @GD1 */
+ xlen = min(xlen, maxblocks);
/*
* allocate data extent requested
cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
+ (xtlck->lwm.offset) ? min(index,(int) xtlck->lwm.offset) : index;
xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
xtlck->lwm.offset;
return rc;
}
-
+#ifdef _STILL_TO_PORT
/* - TBD for defragmentaion/reorganization -
*
--- /dev/null
+/*
+ * Copyright (c) International Business Machines Corp., 2000-2002
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_metapage.h"
+#include "jfs_dinode.h"
+#include "jfs_imap.h"
+#include "jfs_dmap.h"
+#include "jfs_superblock.h"
+#include "jfs_txnmgr.h"
+#include "jfs_debug.h"
+
+#define BITSPERPAGE (PSIZE << 3)
+#define L2MEGABYTE 20
+#define MEGABYTE (1 << L2MEGABYTE)
+#define MEGABYTE32 (MEGABYTE << 5)
+
+/* convert block number to bmap file page number */
+#define BLKTODMAPN(b)\
+ (((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
+
+/*
+ * jfs_extendfs()
+ *
+ * function: extend file system;
+ *
+ * |-------------------------------|----------|----------|
+ * file system space fsck inline log
+ * workspace space
+ *
+ * input:
+ * new LVSize: in LV blocks (required)
+ * new LogSize: in LV blocks (optional)
+ * new FSSize: in LV blocks (optional)
+ *
+ * new configuration:
+ * 1. set new LogSize as specified or default from new LVSize;
+ * 2. compute new FSCKSize from new LVSize;
+ * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
+ * assert(new FSSize >= old FSSize),
+ * i.e., file system must not be shrinked;
+ */
+int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
+{
+ int rc = 0;
+ struct jfs_sb_info *sbi = JFS_SBI(sb);
+ struct inode *ipbmap = sbi->ipbmap;
+ struct inode *ipbmap2;
+ struct inode *ipimap = sbi->ipimap;
+ log_t *log = sbi->log;
+ bmap_t *bmp = sbi->bmap;
+ s64 newLogAddress, newFSCKAddress;
+ int newFSCKSize;
+ s64 newMapSize = 0, mapSize;
+ s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
+ s64 oldLVSize;
+ s64 newFSSize;
+ s64 VolumeSize;
+ int newNpages = 0, nPages, newPage, xlen, t32;
+ int tid;
+ int log_formatted = 0;
+ struct inode *iplist[1];
+ struct jfs_superblock *j_sb, *j_sb2;
+ metapage_t *sbp, *sb2p;
+ uint old_agsize;
+ struct buffer_head *bh;
+
+ /* If the volume hasn't grown, get out now */
+
+ if (sbi->mntflag & JFS_INLINELOG)
+ oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
+ else
+ oldLVSize = addressPXD(&sbi->fsckpxd) +
+ lengthPXD(&sbi->fsckpxd);
+
+ if (oldLVSize >= newLVSize) {
+ printk(KERN_WARNING
+ "jfs_extendfs: volume hasn't grown, returning\n");
+ goto out;
+ }
+
+ VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+
+ if (VolumeSize) {
+ if (newLVSize > VolumeSize) {
+ printk(KERN_WARNING "jfs_extendfs: invalid size\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* check the device */
+ bh = sb_bread(sb, newLVSize - 1);
+ if (!bh) {
+ printk(KERN_WARNING "jfs_extendfs: invalid size\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ bforget(bh);
+ }
+
+ /* Can't extend write-protected drive */
+
+ if (isReadOnly(ipbmap)) {
+ printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
+ rc = -EROFS;
+ goto out;
+ }
+
+ /*
+ * reconfigure LV spaces
+ * ---------------------
+ *
+ * validate new size, or, if not specified, determine new size
+ */
+
+ /*
+ * reconfigure inline log space:
+ */
+ if ((sbi->mntflag & JFS_INLINELOG)) {
+ if (newLogSize == 0) {
+ /*
+ * no size specified: default to 1/256 of aggregate
+ * size; rounded up to a megabyte boundary;
+ */
+ newLogSize = newLVSize >> 8;
+ t32 = (1 << (20 - sbi->l2bsize)) - 1;
+ newLogSize = (newLogSize + t32) & ~t32;
+ newLogSize =
+ min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
+ } else {
+ /*
+ * convert the newLogSize to fs blocks.
+ *
+ * Since this is given in megabytes, it will always be
+ * an even number of pages.
+ */
+ newLogSize = (newLogSize * MEGABYTE) >> sbi->l2bsize;
+ }
+
+ } else
+ newLogSize = 0;
+
+ newLogAddress = newLVSize - newLogSize;
+
+ /*
+ * reconfigure fsck work space:
+ *
+ * configure it to the end of the logical volume regardless of
+ * whether file system extends to the end of the aggregate;
+ * Need enough 4k pages to cover:
+ * - 1 bit per block in aggregate rounded up to BPERDMAP boundary
+ * - 1 extra page to handle control page and intermediate level pages
+ * - 50 extra pages for the chkdsk service log
+ */
+ t64 = ((newLVSize - newLogSize + BPERDMAP - 1) >> L2BPERDMAP)
+ << L2BPERDMAP;
+ t32 = ((t64 + (BITSPERPAGE - 1)) / BITSPERPAGE) + 1 + 50;
+ newFSCKSize = t32 << sbi->l2nbperpage;
+ newFSCKAddress = newLogAddress - newFSCKSize;
+
+ /*
+ * compute new file system space;
+ */
+ newFSSize = newLVSize - newLogSize - newFSCKSize;
+
+ /* file system cannot be shrinked */
+ if (newFSSize < bmp->db_mapsize) {
+ rc = EINVAL;
+ goto out;
+ }
+
+ /*
+ * If we're expanding enough that the inline log does not overlap
+ * the old one, we can format the new log before we quiesce the
+ * filesystem.
+ */
+ if ((sbi->mntflag & JFS_INLINELOG) && (newLogAddress > oldLVSize)) {
+ if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
+ goto out;
+ log_formatted = 1;
+ }
+ /*
+ * quiesce file system
+ *
+ * (prepare to move the inline log and to prevent map update)
+ *
+ * block any new transactions and wait for completion of
+ * all wip transactions and flush modified pages s.t.
+ * on-disk file system is in consistent state and
+ * log is not required for recovery.
+ */
+ txQuiesce(sb);
+
+ if (sbi->mntflag & JFS_INLINELOG) {
+ /*
+ * deactivate old inline log
+ */
+ lmLogShutdown(log);
+
+ /*
+ * mark on-disk super block for fs in transition;
+ *
+ * update on-disk superblock for the new space configuration
+ * of inline log space and fsck work space descriptors:
+ * N.B. FS descriptor is NOT updated;
+ *
+ * crash recovery:
+ * logredo(): if FM_EXTENDFS, return to fsck() for cleanup;
+ * fsck(): if FM_EXTENDFS, reformat inline log and fsck
+ * workspace from superblock inline log descriptor and fsck
+ * workspace descriptor;
+ */
+
+ /* read in superblock */
+ if ((rc = readSuper(sb, &sbp)))
+ goto error_out;
+ j_sb = (struct jfs_superblock *) (sbp->data);
+
+ /* mark extendfs() in progress */
+ j_sb->s_state |= cpu_to_le32(FM_EXTENDFS);
+ j_sb->s_xsize = cpu_to_le64(newFSSize);
+ PXDaddress(&j_sb->s_xfsckpxd, newFSCKAddress);
+ PXDlength(&j_sb->s_xfsckpxd, newFSCKSize);
+ PXDaddress(&j_sb->s_xlogpxd, newLogAddress);
+ PXDlength(&j_sb->s_xlogpxd, newLogSize);
+
+ /* synchronously update superblock */
+ flush_metapage(sbp);
+
+ /*
+ * format new inline log synchronously;
+ *
+ * crash recovery: if log move in progress,
+ * reformat log and exit success;
+ */
+ if (!log_formatted)
+ if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
+ goto error_out;
+
+ /*
+ * activate new log
+ */
+ log->base = newLogAddress;
+ log->size = newLogSize >> (L2LOGPSIZE - sb->s_blocksize_bits);
+ if ((rc = lmLogInit(log)))
+ goto error_out;
+ }
+
+ /*
+ * extend block allocation map
+ * ---------------------------
+ *
+ * extendfs() for new extension, retry after crash recovery;
+ *
+ * note: both logredo() and fsck() rebuild map from
+ * the bitmap and configuration parameter from superblock
+ * (disregarding all other control information in the map);
+ *
+ * superblock:
+ * s_size: aggregate size in physical blocks;
+ */
+ /*
+ * compute the new block allocation map configuration
+ *
+ * map dinode:
+ * di_size: map file size in byte;
+ * di_nblocks: number of blocks allocated for map file;
+ * di_mapsize: number of blocks in aggregate (covered by map);
+ * map control page:
+ * db_mapsize: number of blocks in aggregate (covered by map);
+ */
+ newMapSize = newFSSize;
+ /* number of data pages of new bmap file:
+ * roundup new size to full dmap page boundary and
+ * add 1 extra dmap page for next extendfs()
+ */
+ t64 = (newMapSize - 1) + BPERDMAP;
+ newNpages = BLKTODMAPN(t64) + 1;
+
+ /*
+ * extend map from current map (WITHOUT growing mapfile)
+ *
+ * map new extension with unmapped part of the last partial
+ * dmap page, if applicable, and extra page(s) allocated
+ * at end of bmap by mkfs() or previous extendfs();
+ */
+ extendBmap:
+ /* compute number of blocks requested to extend */
+ mapSize = bmp->db_mapsize;
+ XAddress = mapSize; /* eXtension Address */
+ XSize = newMapSize - mapSize; /* eXtension Size */
+ old_agsize = bmp->db_agsize; /* We need to know if this changes */
+
+ /* compute number of blocks that can be extended by current mapfile */
+ t64 = dbMapFileSizeToMapSize(ipbmap);
+ if (mapSize > t64) {
+ printk(KERN_ERR
+ "jfs_extendfs: mapSize (0x%llx) > t64 (0x%llx)\n",
+ mapSize, t64);
+ rc = EIO;
+ goto error_out;
+ }
+ nblocks = min(t64 - mapSize, XSize);
+
+ /*
+ * update map pages for new extension:
+ *
+ * update/init dmap and bubble up the control hierarchy
+ * incrementally fold up dmaps into upper levels;
+ * update bmap control page;
+ */
+ if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
+ goto error_out;
+ /*
+ * the map now has extended to cover additional nblocks:
+ * dn_mapsize = oldMapsize + nblocks;
+ */
+ /* ipbmap->i_mapsize += nblocks; */
+ XSize -= nblocks;
+
+ /*
+ * grow map file to cover remaining extension
+ * and/or one extra dmap page for next extendfs();
+ *
+ * allocate new map pages and its backing blocks, and
+ * update map file xtree
+ */
+ /* compute number of data pages of current bmap file */
+ nPages = ipbmap->i_size >> L2PSIZE;
+
+ /* need to grow map file ? */
+ if (nPages == newNpages)
+ goto updateImap;
+
+ /*
+ * grow bmap file for the new map pages required:
+ *
+ * allocate growth at the start of newly extended region;
+ * bmap file only grows sequentially, i.e., both data pages
+ * and possibly xtree index pages may grow in append mode,
+ * s.t. logredo() can reconstruct pre-extension state
+ * by washing away bmap file of pages outside s_size boundary;
+ */
+ /*
+ * journal map file growth as if a regular file growth:
+ * (note: bmap is created with di_mode = IFJOURNAL|IFREG);
+ *
+ * journaling of bmap file growth is not required since
+ * logredo() do/can not use log records of bmap file growth
+ * but it provides careful write semantics, pmap update, etc.;
+ */
+ /* synchronous write of data pages: bmap data pages are
+ * cached in meta-data cache, and not written out
+ * by txCommit();
+ */
+ filemap_fdatawait(ipbmap->i_mapping);
+ filemap_fdatawrite(ipbmap->i_mapping);
+ filemap_fdatawait(ipbmap->i_mapping);
+ diWriteSpecial(ipbmap, 0);
+
+ newPage = nPages; /* first new page number */
+ xoff = newPage << sbi->l2nbperpage;
+ xlen = (newNpages - nPages) << sbi->l2nbperpage;
+ xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1);
+ xaddr = XAddress;
+
+ tid = txBegin(sb, COMMIT_FORCE);
+
+ if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) {
+ txEnd(tid);
+ goto error_out;
+ }
+ /* update bmap file size */
+ ipbmap->i_size += xlen << sbi->l2bsize;
+ ipbmap->i_blocks += LBLK2PBLK(sb, xlen);
+
+ iplist[0] = ipbmap;
+ rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
+
+ txEnd(tid);
+
+ if (rc)
+ goto error_out;
+
+ /*
+ * map file has been grown now to cover extension to further out;
+ * di_size = new map file size;
+ *
+ * if huge extension, the previous extension based on previous
+ * map file size may not have been sufficient to cover whole extension
+ * (it could have been used up for new map pages),
+ * but the newly grown map file now covers lot bigger new free space
+ * available for further extension of map;
+ */
+ /* any more blocks to extend ? */
+ if (XSize)
+ goto extendBmap;
+
+ /* finalize bmap */
+ dbFinalizeBmap(ipbmap);
+
+ /*
+ * update inode allocation map
+ * ---------------------------
+ *
+ * move iag lists from old to new iag;
+ * agstart field is not updated for logredo() to reconstruct
+ * iag lists if system crash occurs.
+ * (computation of ag number from agstart based on agsize
+ * will correctly identify the new ag);
+ */
+ updateImap:
+ /* if new AG size the same as old AG size, done! */
+ if (bmp->db_agsize != old_agsize) {
+ if ((rc = diExtendFS(ipimap, ipbmap)))
+ goto error_out;
+
+ /* finalize imap */
+ if ((rc = diSync(ipimap)))
+ goto error_out;
+ }
+
+ /*
+ * finalize
+ * --------
+ *
+ * extension is committed when on-disk super block is
+ * updated with new descriptors: logredo will recover
+ * crash before it to pre-extension state;
+ */
+
+ /* sync log to skip log replay of bmap file growth transaction; */
+ /* lmLogSync(log, 1); */
+
+ /*
+ * synchronous write bmap global control page;
+ * for crash before completion of write
+ * logredo() will recover to pre-extendfs state;
+ * for crash after completion of write,
+ * logredo() will recover post-extendfs state;
+ */
+ if ((rc = dbSync(ipbmap)))
+ goto error_out;
+
+ /*
+ * copy primary bmap inode to secondary bmap inode
+ */
+
+ ipbmap2 = diReadSpecial(sb, BMAP_I, 1);
+ if (ipbmap2 == NULL) {
+ printk(KERN_ERR "jfs_extendfs: diReadSpecial(bmap) failed\n");
+ goto error_out;
+ }
+ memcpy(&JFS_IP(ipbmap2)->i_xtroot, &JFS_IP(ipbmap)->i_xtroot, 288);
+ ipbmap2->i_size = ipbmap->i_size;
+ ipbmap2->i_blocks = ipbmap->i_blocks;
+
+ diWriteSpecial(ipbmap2, 1);
+ diFreeSpecial(ipbmap2);
+ /*
+ * update superblock
+ */
+ if ((rc = readSuper(sb, &sbp)))
+ goto error_out;
+ j_sb = (struct jfs_superblock *) (sbp->data);
+
+ /* mark extendfs() completion */
+ j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS);
+ j_sb->s_size = cpu_to_le64(bmp->db_mapsize) <<
+ le16_to_cpu(j_sb->s_l2bfactor);
+ j_sb->s_agsize = cpu_to_le32(bmp->db_agsize);
+
+ /* update inline log space descriptor */
+ if (sbi->mntflag & JFS_INLINELOG) {
+ PXDaddress(&(j_sb->s_logpxd), newLogAddress);
+ PXDlength(&(j_sb->s_logpxd), newLogSize);
+ }
+
+ /* record log's mount serial number */
+ j_sb->s_logserial = cpu_to_le32(log->serial);
+
+ /* update fsck work space descriptor */
+ PXDaddress(&(j_sb->s_fsckpxd), newFSCKAddress);
+ PXDlength(&(j_sb->s_fsckpxd), newFSCKSize);
+ j_sb->s_fscklog = 1;
+ /* sb->s_fsckloglen remains the same */
+
+ /* Update secondary superblock */
+ sb2p = read_metapage(sbi->direct_inode,
+ SUPER2_OFF >> sb->s_blocksize_bits, PSIZE, 1);
+
+ if (sb2p) {
+ j_sb2 = (struct jfs_superblock *) (sb2p->data);
+ memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
+ flush_metapage(sb2p);
+ }
+
+ /* write primary superblock */
+ flush_metapage(sbp);
+
+ goto resume;
+
+ error_out:
+ updateSuper(sb, FM_DIRTY);
+
+ resume:
+ /*
+ * resume file system transactions
+ */
+ txResume(sb);
+
+ out:
+ return rc;
+}
extern void jfs_write_inode(struct inode *inode, int wait);
extern struct dentry *jfs_get_parent(struct dentry *dentry);
+extern int jfs_extendfs(struct super_block *, s64, int);
-#ifdef PROC_FS_JFS /* see jfs_debug.h */
+#ifdef PROC_FS_JFS /* see jfs_debug.h */
extern void jfs_proc_init(void);
extern void jfs_proc_clean(void);
#endif
*/
maxinodes = min((s64) atomic_read(&imap->im_numinos) +
((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
- << L2INOSPEREXT), (s64)0xffffffffLL);
+ << L2INOSPEREXT), (s64) 0xffffffffLL);
buf->f_files = maxinodes;
buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
atomic_read(&imap->im_numfree));
kfree(sbi);
}
-static int parse_options (char * options, struct jfs_sb_info *sbi)
+static int parse_options(char *options, struct super_block *sb, s64 *newLVSize)
{
void *nls_map = NULL;
- char * this_char;
- char * value;
+ char *this_char;
+ char *value;
+ struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+ *newLVSize = 0;
if (!options)
return 1;
- while ((this_char = strsep (&options, ",")) != NULL) {
+ while ((this_char = strsep(&options, ",")) != NULL) {
if (!*this_char)
continue;
- if ((value = strchr (this_char, '=')) != NULL)
+ if ((value = strchr(this_char, '=')) != NULL)
*value++ = 0;
- if (!strcmp (this_char, "iocharset")) {
+ if (!strcmp(this_char, "iocharset")) {
if (!value || !*value)
goto needs_arg;
if (nls_map) /* specified iocharset twice! */
printk(KERN_ERR "JFS: charset not found\n");
goto cleanup;
}
- /* Silently ignore the quota options */
- } else if (!strcmp (this_char, "grpquota")
- || !strcmp (this_char, "noquota")
- || !strcmp (this_char, "quota")
- || !strcmp (this_char, "usrquota"))
+ } else if (!strcmp(this_char, "resize")) {
+ if (!value || !*value) {
+ *newLVSize = sb->s_bdev->bd_inode->i_size >>
+ sb->s_blocksize_bits;
+ if (*newLVSize == 0)
+ printk(KERN_ERR
+ "JFS: Cannot determine volume size\n");
+ } else
+ *newLVSize = simple_strtoull(value, &value, 0);
+
+ /* Silently ignore the quota options */
+ } else if (!strcmp(this_char, "grpquota")
+ || !strcmp(this_char, "noquota")
+ || !strcmp(this_char, "quota")
+ || !strcmp(this_char, "usrquota"))
/* Don't do anything ;-) */ ;
else {
- printk ("jfs: Unrecognized mount option %s\n", this_char);
+ printk("jfs: Unrecognized mount option %s\n",
+ this_char);
goto cleanup;
}
}
int jfs_remount(struct super_block *sb, int *flags, char *data)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
+ s64 newLVSize = 0;
+ int rc = 0;
- if (!parse_options(data, sbi)) {
+ if (!parse_options(data, sb, &newLVSize)) {
return -EINVAL;
}
+ if (newLVSize) {
+ if (sb->s_flags & MS_RDONLY) {
+ printk(KERN_ERR
+ "JFS: resize requires volume to be mounted read-write\n");
+ return -EROFS;
+ }
+ rc = jfs_extendfs(sb, newLVSize, 0);
+ if (rc)
+ return rc;
+ }
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
/*
struct jfs_sb_info *sbi;
struct inode *inode;
int rc;
+ s64 newLVSize = 0;
jFYI(1, ("In jfs_read_super: s_flags=0x%lx\n", sb->s_flags));
- sbi = kmalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
+ sbi = kmalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOSPC;
- memset(sbi, 0, sizeof(struct jfs_sb_info));
+ memset(sbi, 0, sizeof (struct jfs_sb_info));
sb->u.generic_sbp = sbi;
- if (!parse_options((char *)data, sbi)) {
+ if (!parse_options((char *) data, sb, &newLVSize)) {
kfree(sbi);
return -EINVAL;
}
+ if (newLVSize) {
+ printk(KERN_ERR "resize option for remount only\n");
+ return -EINVAL;
+ }
+
/*
* Initialize blocksize to 4K.
*/
if (rc) {
if (!silent) {
jERROR(1,
- ("jfs_mount failed w/return code = %d\n",
- rc));
+ ("jfs_mount failed w/return code = %d\n", rc));
}
goto out_mount_failed;
}
* Page cache is indexed by long.
* I would use MAX_LFS_FILESIZE, but it's only half as big
*/
- sb->s_maxbytes = min(((u64)PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
+ sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
#endif
return 0;
extern void txExit(void);
extern void metapage_exit(void);
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
{
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
INIT_LIST_HEAD(&jfs_ip->mp_list);
init_rwsem(&jfs_ip->rdwrlock);
init_MUTEX(&jfs_ip->commit_sem);
+ jfs_ip->atlhead = 0;
inode_init_once(&jfs_ip->vfs_inode);
}
}
int rc;
jfs_inode_cachep =
- kmem_cache_create("jfs_ip",
- sizeof(struct jfs_inode_info),
- 0, 0, init_once, NULL);
+ kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 0,
+ init_once, NULL);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
* I/O completion thread (endio)
*/
jfsIOthread = kernel_thread(jfsIOWait, 0,
- CLONE_FS | CLONE_FILES |
- CLONE_SIGHAND);
+ CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
if (jfsIOthread < 0) {
jERROR(1,
- ("init_jfs_fs: fork failed w/rc = %d\n",
- jfsIOthread));
+ ("init_jfs_fs: fork failed w/rc = %d\n", jfsIOthread));
goto end_txmngr;
}
- wait_for_completion(&jfsIOwait); /* Wait until IO thread starts */
+ wait_for_completion(&jfsIOwait); /* Wait until thread starts */
jfsCommitThread = kernel_thread(jfs_lazycommit, 0,
- CLONE_FS | CLONE_FILES |
- CLONE_SIGHAND);
+ CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
if (jfsCommitThread < 0) {
jERROR(1,
("init_jfs_fs: fork failed w/rc = %d\n",
jfsCommitThread));
goto kill_iotask;
}
- wait_for_completion(&jfsIOwait); /* Wait until IO thread starts */
+ wait_for_completion(&jfsIOwait); /* Wait until thread starts */
jfsSyncThread = kernel_thread(jfs_sync, 0,
- CLONE_FS | CLONE_FILES |
- CLONE_SIGHAND);
+ CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
if (jfsSyncThread < 0) {
jERROR(1,
- ("init_jfs_fs: fork failed w/rc = %d\n",
- jfsSyncThread));
+ ("init_jfs_fs: fork failed w/rc = %d\n", jfsSyncThread));
goto kill_committask;
}
- wait_for_completion(&jfsIOwait); /* Wait until IO thread starts */
+ wait_for_completion(&jfsIOwait); /* Wait until thread starts */
#ifdef PROC_FS_JFS
jfs_proc_init();
return register_filesystem(&jfs_fs_type);
-
kill_committask:
jfs_stop_threads = 1;
wake_up(&jfs_commit_thread_wait);
- wait_for_completion(&jfsIOwait); /* Wait until Commit thread exits */
+ wait_for_completion(&jfsIOwait); /* Wait for thread exit */
kill_iotask:
jfs_stop_threads = 1;
wake_up(&jfs_IO_thread_wait);
- wait_for_completion(&jfsIOwait); /* Wait until IO thread exits */
+ wait_for_completion(&jfsIOwait); /* Wait for thread exit */
end_txmngr:
txExit();
free_metapage: