Take advantage of new per-CPU scheme.
#include <asm/asmmacro.h>
#include <asm/offsets.h>
#include <asm/signal.h>
+#include <asm/thread_info.h>
#include "../kernel/minstate.h"
GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task.
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
-.ret1: adds r2=IA64_TASK_PTRACE_OFFSET,r13
+.ret1:
+#endif
+ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
- ld8 r2=[r2]
+ ld4 r2=[r2]
;;
mov r8=0
- tbit.nz p6,p0=r2,PT_SYSCALLTRACE_BIT
+ tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
(p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8
END(ia32_ret_from_clone)
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
mov r16=ar.bsp // get existing backing store pointer
- movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
+ movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
(pKern) br.cond.dpnt skip_rbs_switch
br.cond.sptk ia64_leave_kernel
END(handle_syscall_error)
+#ifdef CONFIG_SMP
/*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted.
br.ret.sptk.many rp
END(ia64_invoke_schedule_tail)
+#endif /* CONFIG_SMP */
+
#if __GNUC__ < 3
/*
#include <asm/processor.h>
# ifndef CONFIG_NUMA
-EXPORT_SYMBOL(_cpu_data);
+EXPORT_SYMBOL(cpu_info);
# endif
EXPORT_SYMBOL(kernel_thread);
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
- adds r2=IA64_TASK_PTRACE_OFFSET,r13 // r2 = ¤t->ptrace
;;
cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
movl r16=sys_call_table