VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 15
-EXTRAVERSION =-pre7
+EXTRAVERSION =-pre8
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
tristate ' Kernel FP software completion' CONFIG_MATHEMU
bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
+ bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
+ bool ' Read-write spinlock debugging' CONFIG_DEBUG_RWLOCK
+ bool ' Semaphore debugging' CONFIG_DEBUG_SEMAPHORE
else
define_tristate CONFIG_MATHEMU y
fi
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
EXPORT_SYMBOL(atomic_dec_and_lock);
-#if DEBUG_SPINLOCK
+#ifdef CONFIG_DEBUG_SPINLOCK
EXPORT_SYMBOL(spin_unlock);
EXPORT_SYMBOL(debug_spin_lock);
EXPORT_SYMBOL(debug_spin_trylock);
#endif
-#if DEBUG_RWLOCK
+#ifdef CONFIG_DEBUG_RWLOCK
EXPORT_SYMBOL(write_lock);
EXPORT_SYMBOL(read_lock);
#endif
/*
* Finally.
*/
-#if DEBUG_SPINLOCK
+#ifdef CONFIG_DEBUG_SPINLOCK
global_irq_lock.task = current;
global_irq_lock.previous = where;
#endif
{
DECLARE_WAITQUEUE(wait, current);
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down failed(%p)\n",
current->comm, current->pid, sem);
#endif
remove_wait_queue(&sem->wait, &wait);
current->state = TASK_RUNNING;
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down acquired(%p)\n",
current->comm, current->pid, sem);
#endif
DECLARE_WAITQUEUE(wait, current);
long ret;
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down failed(%p)\n",
current->comm, current->pid, sem);
#endif
current->state = TASK_RUNNING;
wake_up(&sem->wait);
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down %s(%p)\n",
current->comm, current->pid,
(ret < 0 ? "interrupted" : "acquired"), sem);
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down(%p) <count=%d> from %p\n",
current->comm, current->pid, sem,
atomic_read(&sem->count), __builtin_return_address(0));
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down(%p) <count=%d> from %p\n",
current->comm, current->pid, sem,
atomic_read(&sem->count), __builtin_return_address(0));
ret = __down_trylock(sem);
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down_trylock %s from %p\n",
current->comm, current->pid,
ret ? "failed" : "acquired",
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-#if DEBUG_SEMAPHORE
+#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): up(%p) <count=%d> from %p\n",
current->comm, current->pid, sem,
atomic_read(&sem->count), __builtin_return_address(0));
}
}
\f
-#if DEBUG_SPINLOCK
+#ifdef CONFIG_DEBUG_SPINLOCK
void
spin_unlock(spinlock_t * lock)
{
}
return ret;
}
-#endif /* DEBUG_SPINLOCK */
+#endif /* CONFIG_DEBUG_SPINLOCK */
\f
-#if DEBUG_RWLOCK
+#ifdef CONFIG_DEBUG_RWLOCK
void write_lock(rwlock_t * lock)
{
long regx, regy;
goto try_again;
}
}
-#endif /* DEBUG_RWLOCK */
+#endif /* CONFIG_DEBUG_RWLOCK */
#include <asm/unaligned.h>
#include <asm/sysinfo.h>
#include <asm/hwrpb.h>
+#include <asm/mmu_context.h>
#include "proto.h"
if (alpha_fp_emul(regs.pc-4))
return;
}
- /* fallthrough as illegal instruction .. */
+ break;
+
case 3: /* FEN fault */
+ /* Irritating users can call PAL_clrfen to disable the
+ FPU for the process. The kernel will then trap in
+ do_switch_stack and undo_switch_stack when we try
+ to save and restore the FP registers.
+
+ Given that GCC by default generates code that uses the
+ FP registers, PAL_clrfen is not useful except for DoS
+ attacks. So turn the bleeding FPU back on and be done
+ with it. */
+ current->thread.pal_flags |= 1;
+ __reload_thread(¤t->thread);
+ return;
+
case 5: /* illoc */
default: /* unexpected instruction-fault type */
;
if (jiffies - scq->trans_start > HZ) {
printk("%s: Error pushing TBD for %d.%d\n",
card->name, vc->tx_vcc->vpi, vc->tx_vcc->vci);
+#ifdef CONFIG_ATM_IDT77252_DEBUG
idt77252_tx_dump(card);
+#endif
scq->trans_start = jiffies;
}
ide_add_setting(drive, "multcount", id ? SETTING_RW : SETTING_READ, HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, TYPE_BYTE, 0, id ? id->max_multsect : 0, 1, 2, &drive->mult_count, set_multcount);
ide_add_setting(drive, "nowerr", SETTING_RW, HDIO_GET_NOWERR, HDIO_SET_NOWERR, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr);
ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL);
- ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor], NULL);
+ ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, 4096, PAGE_SIZE, 1024, &max_readahead[major][minor], NULL);
ide_add_setting(drive, "max_kb_per_request", SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL);
ide_add_setting(drive, "lun", SETTING_RW, -1, -1, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL);
ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
- if ((base & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
+ if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
u16 io_base_hi, io_limit_hi;
pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
* we'll end up waiting on them in commit.
*/
ret = fsync_inode_buffers(inode);
+ ret |= fsync_inode_data_buffers(inode);
ext3_force_commit(inode->i_sb);
if (i->i_location != i_pos)
continue;
inode = igrab(i->i_fat_inode);
+ if (inode)
+ break;
}
spin_unlock(&fat_inode_lock);
return inode;
spin_lock(&sb_lock);
sb = sb_entry(super_blocks.next);
for (; nr_inodes && sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
+ if (list_empty(&sb->s_dirty))
+ continue;
spin_unlock(&sb_lock);
nr_inodes = try_to_sync_unused_list(&sb->s_dirty, nr_inodes);
spin_lock(&sb_lock);
*/
inode = NULL;
spin_unlock(&inode_lock);
- if (inode)
- wait_on_inode(inode);
return inode;
}
int minix_sync_file(struct file * file, struct dentry *dentry, int datasync)
{
struct inode *inode = dentry->d_inode;
- int err = fsync_inode_buffers(inode);
+ int err;
+ err = fsync_inode_buffers(inode);
+ err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
if (!NFS_WBACK_BUSY(req))
printk(KERN_ERR "NFS: unlocked request attempted hashed!\n");
if (list_empty(&inode->u.nfs_i.writeback))
- atomic_inc(&inode->i_count);
+ igrab(inode);
inode->u.nfs_i.npages++;
list_add(&req->wb_hash, &inode->u.nfs_i.writeback);
req->wb_count++;
* have a way to deal with that gracefully. Right now I used straightforward
* wrappers, but this needs further analysis wrt potential overflows.
*/
-extern int get_hardware_list(char *);
-extern int get_stram_list(char *);
-#ifdef CONFIG_DEBUG_MALLOC
-extern int get_malloc(char * buffer);
-#endif
#ifdef CONFIG_MODULES
extern int get_module_list(char *);
#endif
release: seq_release,
};
-#ifdef CONFIG_PROC_HARDWARE
-static int hardware_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = get_hardware_list(page);
- return proc_calc_metrics(page, start, off, count, eof, len);
-}
-#endif
-
-#ifdef CONFIG_STRAM_PROC
-static int stram_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = get_stram_list(page);
- return proc_calc_metrics(page, start, off, count, eof, len);
-}
-#endif
-
-#ifdef CONFIG_DEBUG_MALLOC
-static int malloc_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = get_malloc(page);
- return proc_calc_metrics(page, start, off, count, eof, len);
-}
-#endif
-
#ifdef CONFIG_MODULES
static int modules_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{"uptime", uptime_read_proc},
{"meminfo", meminfo_read_proc},
{"version", version_read_proc},
-#ifdef CONFIG_PROC_HARDWARE
- {"hardware", hardware_read_proc},
-#endif
-#ifdef CONFIG_STRAM_PROC
- {"stram", stram_read_proc},
-#endif
-#ifdef CONFIG_DEBUG_MALLOC
- {"malloc", malloc_read_proc},
-#endif
#ifdef CONFIG_MODULES
{"modules", modules_read_proc},
#endif
int sysv_sync_file(struct file * file, struct dentry *dentry, int datasync)
{
struct inode *inode = dentry->d_inode;
- int err = fsync_inode_buffers(inode);
+ int err;
+ err = fsync_inode_buffers(inode);
+ err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
int err;
err = fsync_inode_buffers(inode);
+ err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
#include <linux/wait.h>
#include <linux/rwsem.h>
-#define DEBUG_SEMAPHORE 0
-#define DEBUG_RW_SEMAPHORE 0
-
struct semaphore {
/* Careful, inline assembly knows about the position of these two. */
atomic_t count __attribute__((aligned(8)));
static inline void __down(struct semaphore *sem)
{
long count = atomic_dec_return(&sem->count);
- if (__builtin_expect(count < 0, 0))
+ if (unlikely(count < 0))
__down_failed(sem);
}
static inline int __down_interruptible(struct semaphore *sem)
{
long count = atomic_dec_return(&sem->count);
- if (__builtin_expect(count < 0, 0))
+ if (unlikely(count < 0))
return __down_failed_interruptible(sem);
return 0;
}
: "m"(*sem), "r"(0x0000000100000000)
: "memory");
- if (__builtin_expect(ret <= 0, 0))
+ if (unlikely(ret <= 0))
__up_wakeup(sem);
}
-#if !WAITQUEUE_DEBUG && !DEBUG_SEMAPHORE
+#if !WAITQUEUE_DEBUG && !defined(CONFIG_DEBUG_SEMAPHORE)
extern inline void down(struct semaphore *sem)
{
__down(sem);
#ifndef _ALPHA_SPINLOCK_H
#define _ALPHA_SPINLOCK_H
+#include <linux/config.h>
#include <asm/system.h>
#include <linux/kernel.h>
#include <asm/current.h>
-#define DEBUG_SPINLOCK 0
-#define DEBUG_RWLOCK 0
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
typedef struct {
volatile unsigned int lock /*__attribute__((aligned(32))) */;
-#if DEBUG_SPINLOCK
+#if CONFIG_DEBUG_SPINLOCK
int on_cpu;
int line_no;
void *previous;
#endif
} spinlock_t;
-#if DEBUG_SPINLOCK
+#if CONFIG_DEBUG_SPINLOCK
#define SPIN_LOCK_UNLOCKED (spinlock_t) {0, -1, 0, 0, 0, 0}
#define spin_lock_init(x) \
((x)->lock = 0, (x)->on_cpu = -1, (x)->previous = 0, (x)->task = 0)
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
-#if DEBUG_SPINLOCK
+#if CONFIG_DEBUG_SPINLOCK
extern void spin_unlock(spinlock_t * lock);
extern void debug_spin_lock(spinlock_t * lock, const char *, int);
extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#define spin_lock_own(LOCK, LOCATION) ((void)0)
-#endif /* DEBUG_SPINLOCK */
+#endif /* CONFIG_DEBUG_SPINLOCK */
/***********************************************************/
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#if DEBUG_RWLOCK
+#if CONFIG_DEBUG_RWLOCK
extern void write_lock(rwlock_t * lock);
extern void read_lock(rwlock_t * lock);
#else
: "=m" (*(volatile int *)lock), "=&r" (regx)
: "m" (*(volatile int *)lock) : "memory");
}
-#endif /* DEBUG_RWLOCK */
+#endif /* CONFIG_DEBUG_RWLOCK */
static inline void write_unlock(rwlock_t * lock)
{
#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
#define PCI_IO_LIMIT 0x1d
-#define PCI_IO_RANGE_TYPE_MASK 0x0f /* I/O bridging type */
+#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
#define PCI_IO_RANGE_TYPE_16 0x00
#define PCI_IO_RANGE_TYPE_32 0x01
-#define PCI_IO_RANGE_MASK ~0x0f
+#define PCI_IO_RANGE_MASK (~0x0fUL)
#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
#define PCI_MEMORY_LIMIT 0x22
-#define PCI_MEMORY_RANGE_TYPE_MASK 0x0f
-#define PCI_MEMORY_RANGE_MASK ~0x0f
+#define PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
+#define PCI_MEMORY_RANGE_MASK (~0x0fUL)
#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */
#define PCI_PREF_MEMORY_LIMIT 0x26
-#define PCI_PREF_RANGE_TYPE_MASK 0x0f
+#define PCI_PREF_RANGE_TYPE_MASK 0x0fUL
#define PCI_PREF_RANGE_TYPE_32 0x00
#define PCI_PREF_RANGE_TYPE_64 0x01
-#define PCI_PREF_RANGE_MASK ~0x0f
+#define PCI_PREF_RANGE_MASK (~0x0fUL)
#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */
#define PCI_PREF_LIMIT_UPPER32 0x2c
#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */
#define PCI_CB_IO_BASE_1_HI 0x36
#define PCI_CB_IO_LIMIT_1 0x38
#define PCI_CB_IO_LIMIT_1_HI 0x3a
-#define PCI_CB_IO_RANGE_MASK ~0x03
+#define PCI_CB_IO_RANGE_MASK (~0x03UL)
/* 0x3c-0x3d are same as for htype 0 */
#define PCI_CB_BRIDGE_CONTROL 0x3e
#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */
#define PCI_BRIDGE_RESOURCES 7
#define PCI_NUM_RESOURCES 11
-#define PCI_REGION_FLAG_MASK 0x0f /* These bits of resource flags tell us the PCI region flags */
+#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus {
struct list_head node; /* node in list of buses */
#define TASK_UNINTERRUPTIBLE 2
#define TASK_ZOMBIE 4
#define TASK_STOPPED 8
-#define TASK_DEAD 16
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
}
goto end_wait4;
case TASK_ZOMBIE:
- /* Make sure no other waiter picks this task up */
- p->state = TASK_DEAD;
-
current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
read_unlock(&tasklist_lock);
*/
if (ic_myaddr == INADDR_NONE ||
#ifdef CONFIG_ROOT_NFS
- (root_server_addr == INADDR_NONE && ic_servaddr == INADDR_NONE) ||
+ (MAJOR(ROOT_DEV) == UNNAMED_MAJOR
+ && root_server_addr == INADDR_NONE
+ && ic_servaddr == INADDR_NONE) ||
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC