Several file systems in tree that nominally support files >2GB set their
s_maxbytes value to ~0ULL. This has the nasty side effect on 32bit machines
that when a file write reaches the page cache limit (e.g. 2^43) it'll silently
wrap and destroy data at the beginning of the file.
This patch changes the file systems in question to fill in a proper limit.
I also have an alternate patch that adds a check for this generically
in super.c, but preliminary comments from Al suggested that he prefered
to do it in the file systems, so it is done this way way.
server->namelen = maxlen;
sb->s_maxbytes = fsinfo.maxfilesize;
+ if (sb->s_maxbytes > MAX_LFS_FILESIZE)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Fire up the writeback cache */
if (nfs_reqlist_alloc(server) < 0) {
/* Inform the kernel about which super operations are available. */
sb->s_op = &ntfs_super_operations;
sb->s_magic = NTFS_SUPER_MAGIC;
- sb->s_maxbytes = ~0ULL >> 1;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
ntfs_debug(DEBUG_OTHER, "Reading special files\n");
if (ntfs_load_special_files(vol)) {
ntfs_error("Error loading special files\n");
iput(inode);
goto error_out;
}
- sb->s_maxbytes = ~0ULL;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
return sb;
error_out:
#define MAX_NON_LFS ((1UL<<31) - 1)
+/* Page cache limit. The filesystems should put that into their s_maxbytes
+ limits, otherwise bad things can happen in VM. */
+#if BITS_PER_LONG==32
+#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+#elif BITS_PER_LONG==64
+#define MAX_LFS_FILESIZE 0x7fffffffffffffff
+#endif
+
#define FL_POSIX 1
#define FL_FLOCK 2
#define FL_BROKEN 4 /* broken flock() emulation */