- Fix ARM Makefile; we now build with -Os instead of -O2.
- Fix backtrace code to display more registers correctly.
- Fix various build errors.
- Fix PSR for architecture compliance.
- Update mach-types file.
LDFLAGS_vmlinux :=-p -X -T arch/arm/vmlinux.lds
OBJCOPYFLAGS :=-O binary -R .note -R .comment -S
GZFLAGS :=-9
-CFLAGS +=-pipe
+#CFLAGS +=-pipe
+
+CFLAGS :=$(CFLAGS:-O2=-Os)
ifneq ($(CONFIG_NO_FRAME_POINTER),y)
-CFLAGS :=$(CFLAGS:-fomit-frame-pointer=)
+CFLAGS :=$(CFLAGS: -fomit-frame-pointer=)
endif
ifeq ($(CONFIG_DEBUG_INFO),y)
# the options further down the list override previous items.
#
apcs-$(CONFIG_CPU_32) :=-mapcs-32
-apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3 -Os
+apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3
# This selects which instruction set is used.
# Note that GCC is lame - it doesn't numerically define an
@ln -sf proc-$(PROCESSOR) include/asm-arm/proc
@touch $@
-prepare: include/asm-arm/.arch include/asm-arm/.proc \
- include/asm-arm/constants.h
+prepare: maketools
+
+.PHONY: maketools
+maketools: include/asm-arm/.arch include/asm-arm/.proc \
+ include/asm-arm/constants.h include/linux/version.h FORCE
@$(MAKETOOLS)
vmlinux: arch/arm/vmlinux.lds
$(wildcard include/config/cpu/32.h) \
$(wildcard include/config/cpu/26.h) \
$(wildcard include/config/arch/*.h)
+ @echo ' Generating $@'
@sed 's/TEXTADDR/$(TEXTADDR)/;s/DATAADDR/$(DATAADDR)/' $(LDSCRIPT) >$@
bzImage zImage zinstall Image bootpImage install: vmlinux
archclean: FORCE
@$(MAKEBOOT) clean
-# we need version.h
-maketools: include/linux/version.h FORCE
- @$(MAKETOOLS)
-
# My testing targets (that short circuit a few dependencies)
zImg:; @$(MAKEBOOT) zImage
Img:; @$(MAKEBOOT) Image
/*
* linux/arch/arm/kernel/traps.c
*
- * Copyright (C) 1995, 1996 Russell King
+ * Copyright (C) 1995-2002 Russell King
* Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p = bottom & ~31;
+ mm_segment_t fs;
int i;
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
printk("%s", str);
printk("(0x%08lx to 0x%08lx)\n", bottom, top);
}
printk ("\n");
}
+
+ set_fs(fs);
}
static void dump_instr(struct pt_regs *regs)
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
+ mm_segment_t fs;
int i;
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
printk("Code: ");
for (i = -4; i < 1; i++) {
unsigned int val, bad;
}
}
printk("\n");
+
+ set_fs(fs);
}
static void dump_stack(struct task_struct *tsk, unsigned long sp)
current->comm, current->pid, tsk->thread_info + 1);
if (!user_mode(regs) || in_interrupt()) {
- mm_segment_t fs;
-
- /*
- * We need to switch to kernel mode so that we can
- * use __get_user to safely read from kernel space.
- * Note that we now dump the code first, just in case
- * the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
dump_stack(tsk, (unsigned long)(regs + 1));
dump_backtrace(regs, tsk);
dump_instr(regs);
-
- set_fs(fs);
}
spin_unlock_irq(&die_lock);
}
#ifdef CONFIG_CPU_26
-asmlinkage void do_excpt(int address, struct pt_regs *regs, int mode)
+asmlinkage void do_excpt(unsigned long address, struct pt_regs *regs, int mode)
{
siginfo_t info;
asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
{
unsigned int vectors = vectors_base();
- mm_segment_t fs;
console_verbose();
printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
handler[reason], processor_modes[proc_mode]);
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
/*
* Dump out the vectors and stub routines. Maybe a better solution
* would be to dump them out only if we detect that they are corrupted.
dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40);
dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8);
- set_fs(fs);
-
die("Oops", regs, 0);
- cli();
+ local_irq_disable();
panic("bad mode");
}
struct thread_info *thread = current_thread_info();
siginfo_t info;
- /* You might think just testing `handler' would be enough, but PER_LINUX
- * points it to no_lcall7 to catch undercover SVr4 binaries. Gutted.
- */
if (current->personality != PER_LINUX && thread->exec_domain->handler) {
- /* Hand it off to iBCS. The extra parameter and consequent type
- * forcing is necessary because of the weird ARM calling convention.
- */
thread->exec_domain->handler(n, regs);
return regs->ARM_r0;
}
case NR(usr26):
if (!(elf_hwcap & HWCAP_26BIT))
break;
- regs->ARM_cpsr &= ~0x10;
+ regs->ARM_cpsr &= ~MODE32_BIT;
return regs->ARM_r0;
case NR(usr32):
if (!(elf_hwcap & HWCAP_26BIT))
break;
- regs->ARM_cpsr |= 0x10;
+ regs->ARM_cpsr |= MODE32_BIT;
return regs->ARM_r0;
#else
case NR(cacheflush):
3: tst frame, mask @ Check for address exceptions...
bne 1b
-1001: ldmda frame, {r0, r1, r2, r3} @ fp, sp, lr, pc
- mov next, r0
-
+1001: ldr next, [frame, #-12] @ get fp
+1002: ldr r2, [frame, #-4] @ get lr
+1003: ldr r3, [frame, #0] @ get pc
sub save, r3, offset @ Correct PC for prefetching
bic save, save, mask
+1004: ldr r1, [save, #0] @ get instruction at function
+ mov r1, r1, lsr #10
+ ldr r3, .Ldsi+4
+ teq r1, r3
+ subeq save, save, #4
adr r0, .Lfe
mov r1, save
bic r2, r2, mask
bl printk @ print pc and link register
- sub r0, frame, #16
-1002: ldr r1, [save, #4] @ get instruction at function+4
+ ldr r0, [frame, #-8] @ get sp
+ sub r0, r0, #4
+1005: ldr r1, [save, #4] @ get instruction at function+4
mov r3, r1, lsr #10
ldr r2, .Ldsi+4
teq r3, r2 @ Check for stmia sp!, {args}
addeq save, save, #4 @ next instruction
bleq .Ldumpstm
-1003: ldr r1, [save, #4] @ Get 'stmia sp!, {rlist, fp, ip, lr, pc}' instruction
+ sub r0, frame, #16
+1006: ldr r1, [save, #4] @ Get 'stmia sp!, {rlist, fp, ip, lr, pc}' instruction
mov r3, r1, lsr #10
ldr r2, .Ldsi
teq r3, r2
*/
.section .fixup,"ax"
.align 0
-1004: ldr r0, =.Lbad
+1007: ldr r0, =.Lbad
mov r1, frame
bl printk
LOADREGS(fd, sp!, {r4 - r8, pc})
.section __ex_table,"a"
.align 3
- .long 1001b, 1004b
- .long 1002b, 1004b
- .long 1003b, 1004b
+ .long 1001b, 1007b
+ .long 1002b, 1007b
+ .long 1003b, 1007b
+ .long 1004b, 1007b
+ .long 1005b, 1007b
+ .long 1006b, 1007b
.previous
#define instr r4
return 0;
}
-subsys_initcall(&personal_pci_init);
+subsys_initcall(personal_pci_init);
register_kmi(&integrator_keyboard);
register_kmi(&integrator_mouse);
#endif
+ return 0;
}
__initcall(integrator_init);
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
-# Last update: Tue May 21 14:19:05 2002
+# Last update: Fri Jul 5 21:32:20 2002
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
brh ARCH_BRH BRH 181
s3c2410 ARCH_S3C2410 S3C2410 182
possio_px30 ARCH_POSSIO_PX30 POSSIO_PX30 183
+s3c2800 ARCH_S3C2800 S3C2800 184
+fleetwood SA1100_FLEETWOOD FLEETWOOD 185
+omaha ARCH_OMAHA OMAHA 186
+ta7 ARCH_TA7 TA7 187
+nova SA1100_NOVA NOVA 188
+hmk ARCH_HMK HMK 189
+inphinity ARCH_INPHINITY INPHINITY 190
+fester SA1100_FESTER FESTER 191
+gpi ARCH_GPI GPI 192
+smdk2410 ARCH_SMDK2410 SMDK2410 193
+premium ARCH_PREMIUM PREMIUM 194
+nexio SA1100_NEXIO NEXIO 195
+bitbox SA1100_BITBOX BITBOX 196
+g200 SA1100_G200 G200 197
+gill SA1100_GILL GILL 198
#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
+#include <linux/sched.h>
+#include <linux/mm.h>
#include <asm/proc/cache.h>
#endif
#define IRQT_BOTHEDGE (__IRQT_RISEDGE|__IRQT_FALEDGE)
#define IRQT_LOW (__IRQT_LOWLVL)
#define IRQT_HIGH (__IRQT_HIGHLVL)
+#define IRQT_PROBE (1 << 4)
int set_irq_type(unsigned int irq, unsigned int type);
#ifndef __ASM_ARM_MMU_CONTEXT_H
#define __ASM_ARM_MMU_CONTEXT_H
-#include <asm/bitops.h>
-#include <asm/pgtable.h>
-#include <asm/arch/memory.h>
#include <asm/proc-fns.h>
-#define destroy_context(mm) do { } while(0)
#define init_new_context(tsk,mm) 0
+#define destroy_context(mm) do { } while(0)
/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
* tsk->mm will be NULL
*/
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu)
{
- if (prev != next)
- cpu_switch_mm(next->pgd, next);
+ cpu_switch_mm(next->pgd, next);
}
-#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL,smp_processor_id())
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ cpu_switch_mm(next->pgd, next);
+}
#endif
* The following macros handle the cache and bufferable bits...
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
-#define _L_PTE_READ L_PTE_USER | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
+#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
-#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE)
+#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC)
#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
/*
* Force CPSR to something logical...
*/
- regs->ARM_cpsr &= (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT|MODE32_BIT);
+ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
return 0;
}
break;
#else
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
- : "=r" (ret)
+ : "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory");
break;
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
- : "=r" (ret)
+ : "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory");
break;
+#ifdef _ASMARM_SUSPEND_H
+#define _ASMARM_SUSPEND_H
+
+#endif