- FSR "write" bit moved from bit 8 to bit 11.
- Handle bit 10 of FSR for xscale imprecise aborts.
- Allow Xscale CP0 and CP13 accesses.
- Move Xscale specific implementations to their own file.
# ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wb.o copypage-v4wb.o abort-ev5ej.o
-p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wb.o copypage-v5te.o abort-ev4t.o minicache.o
+p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wb.o copypage-xscale.o abort-xscale.o minicache.o
obj-y += $(sort $(p-y))
* : r3 = saved SPSR
*
* Returns : r0 = address of abort
- * : r1 = FSR, bit 8 = write
+ * : r1 = FSR, bit 11 = write
* : r2-r8 = corrupted
* : r9 = preserved
* : sp = pointer to registers
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r2] @ read aborted ARM instruction
+ bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write?
- orreq r1, r1, #1 << 8 @ yes.
+ orreq r1, r1, #1 << 11 @ yes.
mov pc, lr
* : r3 = saved SPSR
*
* Returns : r0 = address of abort
- * : r1 = FSR, bit 8 = write
+ * : r1 = FSR, bit 11 = write
* : r2-r8 = corrupted
* : r9 = preserved
* : sp = pointer to registers
tst r3, #PSR_T_BIT
ldrneh r3, [r2] @ read aborted thumb instruction
ldreq r3, [r2] @ read aborted ARM instruction
- bic r1, r1, #1 << 8
+ bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20
tst r3, #1 << 20 @ check write
- orreq r1, r1, #1 << 8
+ orreq r1, r1, #1 << 11
mov pc, lr
+++ /dev/null
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-/*
- * Function: v5ej_early_abort
- *
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
- *
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 8 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
- *
- * Purpose : obtain information about current aborted instruction.
- * Note: we read user space. This means we might cause a data
- * abort here if the I-TLB and D-TLB aren't seeing the same
- * picture. Unfortunately, this does happen. We live with it.
- */
- .align 5
-ENTRY(v5ej_early_abort)
- mrc p15, 0, r1, c5, c0, 0 @ get FSR
- mrc p15, 0, r0, c6, c0, 0 @ get FAR
- tst r3, #PSR_J_BIT
- orrne r1, r1, #1 << 8 @ always assume write
- bne 1f
- tst r3, #PSR_T_BIT
- ldrneh r3, [r2] @ read aborted thumb instruction
- ldreq r3, [r2] @ read aborted ARM instruction
- movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20
- tst r3, #1 << 20 @ L = 1 -> write
- orreq r1, r1, #1 << 8 @ yes.
-1: mov pc, lr
-
-
--- /dev/null
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+/*
+ * Function: v5tej_early_abort
+ *
+ * Params : r2 = address of aborted instruction
+ * : r3 = saved SPSR
+ *
+ * Returns : r0 = address of abort
+ * : r1 = FSR, bit 11 = write
+ * : r2-r8 = corrupted
+ * : r9 = preserved
+ * : sp = pointer to registers
+ *
+ * Purpose : obtain information about current aborted instruction.
+ * Note: we read user space. This means we might cause a data
+ * abort here if the I-TLB and D-TLB aren't seeing the same
+ * picture. Unfortunately, this does happen. We live with it.
+ */
+ .align 5
+ENTRY(v5tej_early_abort)
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
+ tst r3, #PSR_J_BIT
+ orrne r1, r1, #1 << 11 @ always assume write
+ bne 1f
+ tst r3, #PSR_T_BIT
+ ldrneh r3, [r2] @ read aborted thumb instruction
+ ldreq r3, [r2] @ read aborted ARM instruction
+ movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20
+ tst r3, #1 << 20 @ L = 1 -> write
+ orreq r1, r1, #1 << 11 @ yes.
+1: mov pc, lr
+
+
* : r3 = saved SPSR
*
* Returns : r0 = address of abort
- * : r1 = FSR, bit 8 = writing
+ * : r1 = FSR, bit 11 = write
* : r2-r8 = corrupted
* : r9 = preserved
* : sp = pointer to registers
tst r3, #PSR_T_BIT @ check for thumb mode
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
ldreq r8, [r2] @ read arm instruction
bne .data_thumb_abort
tst r8, #1 << 20 @ L = 1 -> write?
- orreq r1, r1, #1 << 8 @ yes.
+ orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine
nop
--- /dev/null
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+/*
+ * Function: xscale_abort
+ *
+ * Params : r2 = address of aborted instruction
+ * : r3 = saved SPSR
+ *
+ * Returns : r0 = address of abort
+ * : r1 = FSR, bit 11 = write
+ * : r2-r8 = corrupted
+ * : r9 = preserved
+ * : sp = pointer to registers
+ *
+ * Purpose : obtain information about current aborted instruction.
+ * Note: we read user space. This means we might cause a data
+ * abort here if the I-TLB and D-TLB aren't seeing the same
+ * picture. Unfortunately, this does happen. We live with it.
+ *
+ * Note: Xscale is contains non-standard architecture extensions.
+ * It requires its own early abort handler
+ */
+ .align 5
+ENTRY(xscale_abort)
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ tst r3, #PSR_T_BIT
+ ldrneh r3, [r2] @ read aborted thumb instruction
+ ldreq r3, [r2] @ read aborted ARM instruction
+ bic r1, r1, #1 << 11 @ clear bits 11 of FSR
+ movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20
+ tst r3, #1 << 20 @ check write
+ orreq r1, r1, #1 << 11
+ mov pc, lr
+++ /dev/null
-/*
- * linux/arch/arm/lib/copypage-armv5te.S
- *
- * Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/constants.h>
-
-/*
- * General note:
- * We don't really want write-allocate cache behaviour for these functions
- * since that will just eat through 8K of the cache.
- */
-
- .text
- .align 5
-/*
- * ARMv5TE optimised copy_user_page
- * r0 = destination
- * r1 = source
- * r2 = virtual user address of ultimate destination page
- *
- * The source page may have some clean entries in the cache already, but we
- * can safely ignore them - break_cow() will flush them out of the cache
- * if we eventually end up using our copied page.
- *
- * What we could do is use the mini-cache to buffer reads from the source
- * page. We rely on the mini-cache being smaller than one page, so we'll
- * cycle through the complete cache anyway.
- */
-ENTRY(v5te_mc_copy_user_page)
- stmfd sp!, {r4, r5, lr}
- mov r5, r0
- mov r0, r1
- bl map_page_minicache
- mov r1, r5
- mov lr, #PAGE_SZ/32
-
-1: mov ip, r1
- ldrd r2, [r0], #8
- ldrd r4, [r0], #8
- strd r2, [r1], #8
- ldrd r2, [r0], #8
- strd r4, [r1], #8
- ldrd r4, [r0], #8
- strd r2, [r1], #8
- strd r4, [r1], #8
- mcr p15, 0, ip, c7, c10, 1 @ clean D line
- mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
- subs lr, lr, #1
- bne 1b
-
- ldmfd sp!, {r4, r5, pc}
-
- .align 5
-/*
- * ARMv5TE optimised clear_user_page
- * r0 = destination
- * r1 = virtual user address of ultimate destination page
- */
-ENTRY(v5te_mc_clear_user_page)
- str lr, [sp, #-4]!
- mov r1, #PAGE_SZ/32
- mov r2, #0
- mov r3, #0
-1: mov ip, r0
- strd r2, [r0], #8
- strd r2, [r0], #8
- strd r2, [r0], #8
- strd r2, [r0], #8
- mcr p15, 0, ip, c7, c10, 1 @ clean D line
- mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
- subs r1, r1, #1
- bne 1b
- ldr pc, [sp], #4
-
- .section ".text.init", #alloc, #execinstr
-
-ENTRY(v5te_mc_user_fns)
- .long v5te_mc_clear_user_page
- .long v5te_mc_copy_user_page
--- /dev/null
+/*
+ * linux/arch/arm/lib/copypage-xscale.S
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/constants.h>
+
+/*
+ * General note:
+ * We don't really want write-allocate cache behaviour for these functions
+ * since that will just eat through 8K of the cache.
+ */
+
+ .text
+ .align 5
+/*
+ * XScale optimised copy_user_page
+ * r0 = destination
+ * r1 = source
+ * r2 = virtual user address of ultimate destination page
+ *
+ * The source page may have some clean entries in the cache already, but we
+ * can safely ignore them - break_cow() will flush them out of the cache
+ * if we eventually end up using our copied page.
+ *
+ * What we could do is use the mini-cache to buffer reads from the source
+ * page. We rely on the mini-cache being smaller than one page, so we'll
+ * cycle through the complete cache anyway.
+ */
+ENTRY(xscale_mc_copy_user_page)
+ stmfd sp!, {r4, r5, lr}
+ mov r5, r0
+ mov r0, r1
+ bl map_page_minicache
+ mov r1, r5
+ mov lr, #PAGE_SZ/32
+
+1: mov ip, r1
+ ldrd r2, [r0], #8
+ ldrd r4, [r0], #8
+ strd r2, [r1], #8
+ ldrd r2, [r0], #8
+ strd r4, [r1], #8
+ ldrd r4, [r0], #8
+ strd r2, [r1], #8
+ strd r4, [r1], #8
+ mcr p15, 0, ip, c7, c10, 1 @ clean D line
+ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
+ subs lr, lr, #1
+ bne 1b
+
+ ldmfd sp!, {r4, r5, pc}
+
+ .align 5
+/*
+ * XScale optimised clear_user_page
+ * r0 = destination
+ * r1 = virtual user address of ultimate destination page
+ */
+ENTRY(xscale_mc_clear_user_page)
+ str lr, [sp, #-4]!
+ mov r1, #PAGE_SZ/32
+ mov r2, #0
+ mov r3, #0
+1: mov ip, r0
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ mcr p15, 0, ip, c7, c10, 1 @ clean D line
+ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
+ subs r1, r1, #1
+ bne 1b
+ ldr pc, [sp], #4
+
+ .section ".text.init", #alloc, #execinstr
+
+ENTRY(xscale_mc_user_fns)
+ .long xscale_mc_clear_user_page
+ .long xscale_mc_copy_user_page
int sig;
const char *name;
} fsr_info[] = {
+ /*
+ * The following are the standard ARMv3 and ARMv4 aborts. ARMv5
+ * defines these to be "precise" aborts.
+ */
{ do_bad, SIGSEGV, "vector exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGKILL, "terminal exception" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_sect_fault, SIGSEGV, "section permission fault" },
{ do_bad, SIGBUS, "external abort on translation" },
- { do_page_fault, SIGSEGV, "page permission fault" }
+ { do_page_fault, SIGSEGV, "page permission fault" },
+ /*
+ * The following are "imprecise" aborts, which are signalled by bit
+ * 10 of the FSR, and may not be recoverable. These are only
+ * supported if the CPU abort handler supports bit 10.
+ */
+ { do_bad, SIGBUS, "unknown 16" },
+ { do_bad, SIGBUS, "unknown 17" },
+ { do_bad, SIGBUS, "unknown 18" },
+ { do_bad, SIGBUS, "unknown 19" },
+ { do_bad, SIGBUS, "lock abort" }, /* xscale */
+ { do_bad, SIGBUS, "unknown 21" },
+ { do_bad, SIGBUS, "imprecise external abort" }, /* xscale */
+ { do_bad, SIGBUS, "unknown 23" },
+ { do_bad, SIGBUS, "dcache parity error" }, /* xscale */
+ { do_bad, SIGBUS, "unknown 25" },
+ { do_bad, SIGBUS, "unknown 26" },
+ { do_bad, SIGBUS, "unknown 27" },
+ { do_bad, SIGBUS, "unknown 28" },
+ { do_bad, SIGBUS, "unknown 29" },
+ { do_bad, SIGBUS, "unknown 30" },
+ { do_bad, SIGBUS, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, const char *name)
{
- if (nr >= 0 && nr < 16) {
+ if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) {
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].name = name;
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- const struct fsr_info *inf = fsr_info + (fsr & 15);
+ const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
if (!inf->fn(addr, fsr, regs))
return;
*/
.type arm926_processor_functions, #object
arm926_processor_functions:
- .word v5ej_early_abort
+ .word v5tej_early_abort
.word cpu_arm926_check_bugs
.word cpu_arm926_proc_init
.word cpu_arm926_proc_fin
*/
.align 5
ENTRY(cpu_xscale_set_pte)
- tst r0, #2048
- streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0xff0
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0, 0 @ load domain access register
+ mov r0, #1 @ Allow access to CP0 and CP13
+ orr r0, r0, #1 << 13 @ Its undefined whether this
+ mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
mrc p15, 0, r0, c1, c0, 0 @ get control register
bic r0, r0, #0x0200 @ .... ..R. .... ....
bic r0, r0, #0x0082 @ .... .... B... ..A.
.type xscale_processor_functions, #object
ENTRY(xscale_processor_functions)
- .word v4t_early_abort
+ .word xscale_abort
.word cpu_xscale_check_bugs
.word cpu_xscale_proc_init
.word cpu_xscale_proc_fin
.type cpu_arch_name, #object
cpu_arch_name:
- .asciz "armv5"
+ .asciz "armv5te"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
.long cpu_80200_info
.long xscale_processor_functions
.long v4wbi_tlb_fns
- .long v5te_mc_user_fns
+ .long xscale_mc_user_fns
.size __80200_proc_info, . - __80200_proc_info
.type __pxa250_proc_info,#object
.long cpu_pxa250_info
.long xscale_processor_functions
.long v4wbi_tlb_fns
- .long v5te_mc_user_fns
+ .long xscale_mc_user_fns
.size __pxa250_proc_info, . - __pxa250_proc_info