IPL device.
CONFIG_IPL_TAPE
- Select this option if you want to IPL the image from a Tape.
+ Select "tape" if you want to IPL the image from a Tape.
+
+ Select "vm_reader" if you are running under VM/ESA and want
+ to IPL the image from the emulated card reader.
CONFIG_FAST_IRQ
Select this option in order to get the interrupts processed faster
interrupts which will also be processed before leaving the interrupt
context. This speeds up the I/O a lot. Say "Y".
+CONFIG_MACHCHK_WARNING
+ Select this option if you want the machine check handler on IBM S/390 or
+ zSeries to process warning machine checks (e.g. on power failures).
+ If unsure, say "Y".
+
+CONFIG_CHSC
+ Select this option if you want the s390 common I/O layer to use information
+ obtained by channel subsystem calls. This will enable Linux to process link
+ failures and resource accessibility events. Moreover, if you have procfs
+ enabled, you'll be able to toggle chpids logically offline and online. Even
+ if you don't understand what this means, you should say "Y".
+
CONFIG_PROCESS_DEBUG
Say Y to print all process fault locations to the console. This is
a debugging option; you probably do not want to set it unless you
are an S390 port maintainer.
+CONFIG_PFAULT
+ Select this option, if you want to use PFAULT pseudo page fault
+ handling under VM. If running native or in LPAR, this option
+ has no effect. If your VM does not support PFAULT, PAGEEX
+ pseudo page fault handling will be used.
+ Note that VM 4.2 supports PFAULT but has a bug in its
+ implementation that causes some problems.
+ Everybody who wants to run Linux under VM != VM4.2 should select
+ this option.
+
+CONFIG_SHARED_KERNEL
+ Select this option, if you want to share the text segment of the
+ Linux kernel between different VM guests. This reduces memory
+ usage with lots of guests but greatly increases kernel size.
+ You should only select this option if you know what you are
+ doing and want to exploit this feature.
+
+CONFIG_QDIO
+ This driver provides the Queued Direct I/O base support for the
+ IBM S/390 (G5 and G6) and eServer zSeries (z800 and z900).
+
+ For details please refer to the documentation provided by IBM at
+ <http://www10.software.ibm.com/developerworks/opensource/linux390>
+
+ This driver is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called qdio.o. If you want to compile it as a
+ module, say M here and read <file:Documentation/modules.txt>.
+
+ If unsure, say Y.
+
+Performance statistics for QDIO base support
+CONFIG_QDIO_PERF_STATS
+ Say Y here to get performance statistics in /proc/qdio_perf
+
+ If unsure, say N.
+
-
+#
# s390/Makefile
#
# This file is included by the global makefile so that you can add your own
#
LD=$(CROSS_COMPILE)ld -m elf_s390
-CPP=$(CC) -E
OBJCOPY=$(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S
LDFLAGS=-e start
ifeq ($(CONFIG_SHARED_KERNEL),y)
-LINKFLAGS =-T $(TOPDIR)/arch/s390/vmlinux-shared.lds $(LDFLAGS)
+ LINKSCRIPT := arch/s390/vmlinux-shared.lds
else
-LINKFLAGS =-T $(TOPDIR)/arch/s390/vmlinux.lds $(LDFLAGS)
+ LINKSCRIPT := arch/s390/vmlinux.lds
endif
+LINKFLAGS =-T $(TOPDIR)/$(LINKSCRIPT) $(LDFLAGS)
CFLAGS_PIPE := -pipe
CFLAGS_NSR := -fno-strength-reduce
HEAD := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
-SUBDIRS := $(SUBDIRS) arch/s390/mm arch/s390/kernel arch/s390/lib \
- drivers/s390 arch/s390/math-emu
+SUBDIRS += arch/s390/mm arch/s390/kernel arch/s390/lib drivers/s390
CORE_FILES := arch/s390/mm/mm.o arch/s390/kernel/kernel.o $(CORE_FILES)
-DRIVERS := $(DRIVERS) drivers/s390/built-in.o
LIBS := $(TOPDIR)/arch/s390/lib/lib.a $(LIBS) $(TOPDIR)/arch/s390/lib/lib.a
+DRIVERS += drivers/s390/built-in.o
+
ifeq ($(CONFIG_MATHEMU),y)
- CORE_FILES := $(CORE_FILES) arch/s390/math-emu/math-emu.o
+ SUBDIRS += arch/s390/math-emu
+ DRIVERS += arch/s390/math-emu/math-emu.o
endif
all: image listing
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+vmlinux: $(LINKSCRIPT)
+
image: vmlinux
@$(MAKEBOOT) image
archclean:
@$(MAKEBOOT) clean
+ $(MAKE) -C arch/$(ARCH)/kernel clean
archmrproper:
archdep:
@$(MAKEBOOT) dep
+
+install: vmlinux
+ @$(MAKEBOOT) BOOTIMAGE=image install
OBJCOPY = $(CROSS_COMPILE)objcopy
-O_TARGET :=
-
EXTRA_AFLAGS := -traditional
include $(TOPDIR)/Rules.make
clean:
rm -f image listing iplfba.boot ipleckd.boot ipldump.boot
+install: $(CONFIGURE) $(BOOTIMAGE)
+ sh -x ./install.sh $(KERNELRELEASE) $(BOOTIMAGE) $(TOPDIR)/System.map $(TOPDIR)/Kerntypes "$(INSTALL_PATH)"
+
--- /dev/null
+#!/bin/sh
+#
+# arch/s390x/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for s390 architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+# User may have a custom install script
+
+if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
+if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+
+# Default install - same as make zlilo
+
+if [ -f $4/vmlinuz ]; then
+ mv $4/vmlinuz $4/vmlinuz.old
+fi
+
+if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
+fi
+
+cat $2 > $4/vmlinuz
+cp $3 $4/System.map
define_bool CONFIG_EISA n
define_bool CONFIG_MCA n
define_bool CONFIG_UID16 y
-define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
-define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
+define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
+define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
define_bool CONFIG_GENERIC_BUST_SPINLOCK n
mainmenu_name "Linux Kernel Configuration"
endmenu
mainmenu_option next_comment
-comment 'General setup'
+comment 'Base setup'
bool 'Fast IRQ handling' CONFIG_FAST_IRQ
+bool 'Process warning machine checks' CONFIG_MACHCHK_WARNING
+bool 'Use chscs for Common I/O' CONFIG_CHSC
+
+tristate 'QDIO support' CONFIG_QDIO
+ if [ "$CONFIG_QDIO" != "n" ]; then
+ bool ' Performance statistics in /proc' CONFIG_QDIO_PERF_STATS
+ fi
+
bool 'Builtin IPL record support' CONFIG_IPL
if [ "$CONFIG_IPL" = "y" ]; then
choice 'IPL method generated into head.S' \
bool 'VM shared kernel support' CONFIG_SHARED_KERNEL
endmenu
+mainmenu_option next_comment
+comment 'SCSI support'
+
+tristate 'SCSI support' CONFIG_SCSI
+
+if [ "$CONFIG_SCSI" != "n" ]; then
+ source drivers/scsi/Config.in
+fi
+endmenu
+
source drivers/s390/Config.in
if [ "$CONFIG_NET" = "y" ]; then
comment 'Kernel hacking'
#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC
-if [ "$CONFIG_CTC" = "y" ]; then
- bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG
-fi
+#if [ "$CONFIG_CTC" = "y" ]; then
+# bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG
+#fi
bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
endmenu
# CONFIG_EISA is not set
# CONFIG_MCA is not set
CONFIG_UID16=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
-CONFIG_GENERIC_BUST_SPINLOCK=n
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_GENERIC_BUST_SPINLOCK is not set
CONFIG_ARCH_S390=y
#
#
CONFIG_EXPERIMENTAL=y
+#
+# General setup
+#
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+
#
# Loadable module support
#
CONFIG_MATHEMU=y
#
-# General setup
+# Base setup
#
CONFIG_FAST_IRQ=y
+CONFIG_MACHCHK_WARNING=y
+CONFIG_CHSC=y
+CONFIG_QDIO=m
+# CONFIG_QDIO_PERF_STATS is not set
CONFIG_IPL=y
# CONFIG_IPL_TAPE is not set
CONFIG_IPL_VM=y
-CONFIG_NET=y
-CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
CONFIG_KCORE_ELF=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set
+#
+# SCSI support
+#
+CONFIG_SCSI=m
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_SR_EXTRA_DEVS=10
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_REPORT_LUNS is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_MEGARAID is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_SCSI_PCMCIA is not set
+
#
# Block device drivers
#
#
# S/390 tape hardware support
#
-CONFIG_S390_TAPE_3490=y
-CONFIG_S390_TAPE_3480=y
+CONFIG_S390_TAPE_3490=m
+CONFIG_S390_TAPE_3480=m
#
# Network device drivers
#
CONFIG_CHANDEV=y
CONFIG_HOTPLUG=y
+CONFIG_LCS=m
CONFIG_CTC=m
CONFIG_IUCV=m
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
-CONFIG_NETLINK=y
-# CONFIG_RTNETLINK is not set
# CONFIG_NETLINK_DEV is not set
# CONFIG_NETFILTER is not set
# CONFIG_FILTER is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
CONFIG_IPV6=m
-# CONFIG_IPV6_NETLINK is not set
# CONFIG_KHTTPD is not set
# CONFIG_ATM is not set
+# CONFIG_VLAN_8021Q is not set
#
#
#
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
+
+#
+# Appletalk devices
+#
+# CONFIG_DEV_APPLETALK is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_FASTROUTE is not set
+CONFIG_NET_FASTROUTE=y
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# File systems
#
# CONFIG_QUOTA is not set
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_BFS_FS is not set
-# CONFIG_CMS_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
+CONFIG_EXT3_FS=y
+CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
# CONFIG_FAT_FS is not set
# CONFIG_MSDOS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_TMPFS is not set
-# CONFIG_RAMFS is not set
+CONFIG_RAMFS=y
# CONFIG_ISO9660_FS is not set
# CONFIG_JOLIET is not set
# CONFIG_ZISOFS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
# CONFIG_MINIX_FS is not set
-# CONFIG_FREEVXFS_FS is not set
+# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS_DEBUG is not set
# CONFIG_HPFS_FS is not set
# CONFIG_ROOT_NFS is not set
CONFIG_NFSD=y
# CONFIG_NFSD_V3 is not set
+# CONFIG_NFSD_TCP is not set
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
+CONFIG_EXPORTFS=y
# CONFIG_SMB_FS is not set
# CONFIG_NCP_FS is not set
# CONFIG_NCPFS_PACKET_SIGNING is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
# CONFIG_ZISOFS_FS is not set
-# CONFIG_ZLIB_FS_INFLATE is not set
#
# Partition Types
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
# Kernel hacking
#
CONFIG_MAGIC_SYSRQ=y
+
+#
+# Library routines
+#
+# CONFIG_CRC32 is not set
+# CONFIG_ZLIB_INFLATE is not set
+# CONFIG_ZLIB_DEFLATE is not set
# Makefile for the linux kernel.
#
-O_TARGET := kernel.o
EXTRA_TARGETS := head.o init_task.o
+EXTRA_AFLAGS := -traditional
+
+O_TARGET := kernel.o
export-objs := debug.o ebcdic.o irq.o s390_ext.o smp.o s390_ksyms.o
obj-y := entry.o bitmap.o traps.o time.o process.o irq.o \
#
obj-$(CONFIG_REMOTE_DEBUG) += gdb-stub.o #gdb-low.o
-EXTRA_AFLAGS := -traditional
-
include $(TOPDIR)/Rules.make
+.PHONY: asm-offsets.h
+
+entry.S: asm-offsets.h
+
+asm-offsets.h: asm-offsets.c
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -S $< -o - | grep -- "->" | \
+ (echo "#ifndef __ASM_OFFSETS_H__"; \
+ echo "#define __ASM_OFFSETS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY"; \
+ echo " *"; \
+ echo " * This file was generated by arch/s390/kernel/Makefile"; \
+ echo " */"; \
+ sed -e "s:^->\([^ ]*\) \([^ ]*\) \(.*\):#define \\1 \\2 /* \\3*/:" \
+ -e "s:->::"; \
+ echo "#endif" \
+ ) > asm-offsets.h
+
+clean:
+ rm -f asm-offsets.h
+
+
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+
+/* Use marker if you need to separate the values later */
+
+#define DEFINE(sym, val, marker) \
+ asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),);
+ DEFINE(__THREAD_ar2, offsetof(struct task_struct, thread.ar2),);
+ DEFINE(__THREAD_ar4, offsetof(struct task_struct, thread.ar4),);
+ DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
+ DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
+ BLANK();
+ DEFINE(__TI_task, offsetof(struct thread_info, task),);
+ DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
+ DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
+ DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
+ DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
+ return 0;
+}
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
+ .globl _sb_findmap
+_sb_findmap:
+ .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+
size_t user_len, loff_t * offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
-static struct proc_dir_entry
-*debug_create_proc_dir_entry(struct proc_dir_entry *root,
- const char *name, mode_t mode,
- struct file_operations *fops);
-static void debug_delete_proc_dir_entry(struct proc_dir_entry *root,
- struct proc_dir_entry *entry);
static debug_info_t* debug_info_create(char *name, int page_order, int nr_areas, int buf_size);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
static int initialized = 0;
static struct file_operations debug_file_ops = {
- owner: THIS_MODULE,
read: debug_output,
write: debug_input,
open: debug_open,
strncpy(rc->name, name, MIN(strlen(name), (DEBUG_MAX_PROCF_LEN - 1)));
rc->name[MIN(strlen(name), (DEBUG_MAX_PROCF_LEN - 1))] = 0;
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
+#ifdef CONFIG_PROC_FS
memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct proc_dir_entry*));
+#endif /* CONFIG_PROC_FS */
atomic_set(&(rc->ref_count), 0);
return rc;
/* create proc rood directory */
-
rc->proc_root_entry = proc_mkdir(rc->name, debug_proc_root_entry);
/* append new element to linked list */
-
if (debug_area_first == NULL) {
/* first element in list */
debug_area_first = rc;
if (!db_info)
return;
if (atomic_dec_and_test(&db_info->ref_count)) {
+#ifdef DEBUG
printk(KERN_INFO "debug: freeing debug area %p (%s)\n",
db_info, db_info->name);
+#endif
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
- if (db_info->views[i] != NULL)
- debug_delete_proc_dir_entry
- (db_info->proc_root_entry,
- db_info->proc_entries[i]);
+ if (db_info->views[i] == NULL)
+ continue;
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(db_info->proc_entries[i]->name,
+ db_info->proc_root_entry);
+#endif
}
- debug_delete_proc_dir_entry(debug_proc_root_entry,
- db_info->proc_root_entry);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(db_info->proc_root_entry->name,
+ debug_proc_root_entry);
+#endif
if(db_info == debug_area_first)
debug_area_first = db_info->next;
if(db_info == debug_area_last)
#ifdef DEBUG
printk("debug_open\n");
#endif
+ MOD_INC_USE_COUNT;
down(&debug_lock);
/* find debug log and view */
debug_info_snapshot = debug_info_copy(debug_info);
if(!debug_info_snapshot){
+#ifdef DEBUG
printk(KERN_ERR "debug_open: debug_info_copy failed (out of mem)\n");
+#endif
rc = -ENOMEM;
goto out;
}
if ((file->private_data =
kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) {
+#ifdef DEBUG
printk(KERN_ERR "debug_open: kmalloc failed\n");
+#endif
debug_info_free(debug_info_snapshot);
rc = -ENOMEM;
goto out;
out:
up(&debug_lock);
+ if (rc != 0)
+ MOD_DEC_USE_COUNT;
return rc;
}
debug_info_free(p_info->debug_info_snap);
debug_info_put(p_info->debug_info_org);
kfree(file->private_data);
+ MOD_DEC_USE_COUNT;
return 0; /* success */
}
-/*
- * debug_create_proc_dir_entry:
- * - initializes proc-dir-entry and registers it
- */
-
-static struct proc_dir_entry *debug_create_proc_dir_entry
- (struct proc_dir_entry *root, const char *name, mode_t mode,
- struct file_operations *fops)
-{
- struct proc_dir_entry *rc = create_proc_entry(name, mode, root);
- if (rc && fops)
- rc->proc_fops = fops;
- return rc;
-}
-
-
-/*
- * delete_proc_dir_entry:
- */
-
-static void debug_delete_proc_dir_entry
- (struct proc_dir_entry *root, struct proc_dir_entry *proc_entry)
-{
- remove_proc_entry(proc_entry->name, root);
-}
-
/*
* debug_register:
* - creates and initializes debug area for the caller
goto out;
debug_register_view(rc, &debug_level_view);
debug_register_view(rc, &debug_flush_view);
+#ifdef DEBUG
printk(KERN_INFO
"debug: reserved %d areas of %d pages for debugging %s\n",
nr_areas, 1 << page_order, rc->name);
+#endif
out:
if (rc == NULL){
printk(KERN_ERR "debug: debug_register failed for %s\n",name);
if (!id)
goto out;
down(&debug_lock);
+#ifdef DEBUG
printk(KERN_INFO "debug: unregistering %s\n", id->name);
+#endif
debug_info_put(id);
up(&debug_lock);
down(&debug_lock);
if (!initialized) {
+#ifdef CONFIG_PROC_FS
debug_proc_root_entry = proc_mkdir(DEBUG_DIR_ROOT, NULL);
+#endif /* CONFIG_PROC_FS */
printk(KERN_INFO "debug: Initialization complete\n");
initialized = 1;
}
mode |= S_IRUSR;
if (view->input_proc)
mode |= S_IWUSR;
- id->proc_entries[i] =
- debug_create_proc_dir_entry(id->proc_root_entry,
- view->name, mode,
- &debug_file_ops);
+ id->proc_entries[i] = create_proc_entry(view->name, mode,
+ id->proc_root_entry);
+ if (id->proc_entries[i] != NULL)
+ id->proc_entries[i]->proc_fops = &debug_file_ops;
rc = 0;
}
spin_unlock_irqrestore(&id->lock, flags);
if (i == DEBUG_MAX_VIEWS)
rc = -1;
else {
- debug_delete_proc_dir_entry(id->proc_root_entry,
- id->proc_entries[i]);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(id->proc_entries[i]->name,
+ id->proc_root_entry);
+#endif
id->views[i] = NULL;
rc = 0;
}
#ifdef DEBUG
printk("debug_cleanup_module: \n");
#endif
- debug_delete_proc_dir_entry(NULL, debug_proc_root_entry);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(debug_proc_root_entry->name, NULL);
+#endif /* CONFIG_PROC_FS */
return;
}
#include <asm/errno.h>
#include <asm/smp.h>
#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+
+#include "asm-offsets.h"
/*
* Stack layout for the system_call stack entry.
SP_ORIG_R2 = STACK_FRAME_OVERHEAD + PT_ORIGGPR2
/* Now the additional entries */
SP_TRAP = (SP_ORIG_R2+GPR_SIZE)
-#if CONFIG_REMOTE_DEBUG
-SP_CRREGS = (SP_TRAP+4)
-/* fpu registers are saved & restored by the gdb stub itself */
-SP_FPC = (SP_CRREGS+(NUM_CRS*CR_SIZE))
-SP_FPRS = (SP_FPC+FPC_SIZE+FPC_PAD_SIZE)
-SP_PGM_OLD_ILC= (SP_FPRS+(NUM_FPRS*FPR_SIZE))
-#else
-SP_PGM_OLD_ILC= (SP_TRAP+4)
-#endif
-SP_SIZE = (SP_PGM_OLD_ILC+4)
-/*
- * these defines are offsets into the thread_struct
- */
-_TSS_PTREGS = 0
-_TSS_FPRS = (_TSS_PTREGS+8)
-_TSS_AR2 = (_TSS_FPRS+136)
-_TSS_AR4 = (_TSS_AR2+4)
-_TSS_KSP = (_TSS_AR4+4)
-_TSS_USERSEG = (_TSS_KSP+4)
-_TSS_ERROR = (_TSS_USERSEG+4)
-_TSS_PROT = (_TSS_ERROR+4)
-_TSS_TRAP = (_TSS_PROT+4)
-_TSS_MM = (_TSS_TRAP+4)
-_TSS_PER = (_TSS_MM+8)
-_TSS_IEEE = (_TSS_PER+36)
-_TSS_FLAGS = (_TSS_IEEE+4)
+SP_SIZE = (SP_TRAP+4)
-/*
- * these are offsets into the task-struct.
- */
-state = 0
-flags = 4
-#error sigpending = 8
-#error need_resched = 24
-#error tsk_ptrace = 28
-processor = 56
+_TIF_WORK_MASK = (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
/*
* Base Address of this Module --- saved in __LC_ENTRY_BASE
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL psworg,sync # system entry macro
+ .macro SAVE_ALL_BASE
stm %r13,%r15,__LC_SAVE_AREA
basr %r13,0 # temp base pointer
- l %r13,.Lentry_base-.(%r13) # load &entry_base to %r13
+0: stam %a2,%a4,__LC_SAVE_AREA+12
+ l %r13,.Lentry_base-0b(%r13)# load &entry_base to %r13
+ .endm
+
+ .macro SAVE_ALL psworg,sync # system entry macro
tm \psworg+1,0x01 # test problem state bit
- stam %a2,%a4,__LC_SAVE_AREA+12
.if \sync
bz BASED(1f) # skip stack setup save
.else
lpsw __LC_RETURN_PSW # back to caller
.endm
- .macro GET_CURRENT
+ .macro GET_THREAD_INFO
l %r9,BASED(.Lc0xffffe000) # load pointer to task_struct to %r9
al %r9,__LC_KERNEL_STACK
.endm
/*
* Scheduler resume function, called by switch_to
- * grp2 = (thread_struct *) prev->tss
- * grp3 = (thread_struct *) next->tss
+ * gpr2 = (task_struct *) prev
+ * gpr3 = (task_struct *) next
* Returns:
* gpr2 = prev
*/
resume:
basr %r1,0
resume_base:
- l %r4,_TSS_PTREGS(%r3)
- tm SP_PSW-SP_PTREGS(%r4),0x40 # is the new process using per ?
- bz resume_noper-resume_base(%r1) # if not we're fine
- stctl %c9,%c11,24(%r15) # We are using per stuff
- clc _TSS_PER(12,%r3),24(%r15)
- be resume_noper-resume_base(%r1) # we got away w/o bashing TLB's
- lctl %c9,%c11,_TSS_PER(%r3) # Nope we didn't
+ tm __THREAD_per(%r3),0xe8 # new process is using per ?
+ bz resume_noper-resume_base(%r1) # if not we're fine
+ stctl %c9,%c11,24(%r15) # We are using per stuff
+ clc __THREAD_per(12,%r3),24(%r15)
+ be resume_noper-resume_base(%r1) # we got away w/o bashing TLB's
+ lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
resume_noper:
stm %r6,%r15,24(%r15) # store resume registers of prev task
- st %r15,_TSS_KSP(%r2) # store kernel stack ptr to prev->tss.ksp
- lr %r0,%r15
- n %r0,.Lc0xffffe000-resume_base(%r1)
- l %r15,_TSS_KSP(%r3) # load kernel stack ptr from next->tss.ksp
- l %r1,.Lc8191-resume_base(%r1)
- or %r1,%r15
- la %r1,1(%r1)
- st %r1,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
- stam %a2,%a2,_TSS_AR2(%r2) # store kernel access reg. 2
- stam %a4,%a4,_TSS_AR4(%r2) # store kernel access reg. 4
- lam %a2,%a2,_TSS_AR2(%r3) # load kernel access reg. 2
- lam %a4,%a4,_TSS_AR4(%r3) # load kernel access reg. 4
- lr %r2,%r0 # return task_struct of last task
- lm %r6,%r15,24(%r15) # load resume registers of next task
- br %r14
+ st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
+ l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
+ stam %a2,%a2,__THREAD_ar2(%r2) # store kernel access reg. 2
+ stam %a4,%a4,__THREAD_ar4(%r2) # store kernel access reg. 4
+ lam %a2,%a2,__THREAD_ar2(%r3) # load kernel access reg. 2
+ lam %a4,%a4,__THREAD_ar4(%r3) # load kernel access reg. 4
+ lm %r6,%r15,24(%r15) # load resume registers of next task
+ l %r3,__THREAD_info(%r3) # load thread_info from task struct
+ ahi %r3,8192
+ st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
+ br %r14
/*
* do_softirq calling function. We want to run the softirq functions on the
.globl system_call
system_call:
+ SAVE_ALL_BASE
SAVE_ALL __LC_SVC_OLD_PSW,1
- mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid
-pgm_system_call:
- GET_CURRENT # load pointer to task_struct to R9
- slr %r8,%r8 # gpr 8 is call save (-> tracesys)
- ic %r8,0x8B # get svc number from lowcore
- stosm 24(%r15),0x03 # reenable interrupts
+ lh %r8,0x8a # get svc number from lowcore
sll %r8,2
- l %r8,sys_call_table-entry_base(8,%r13) # get address of system call
-#error tm tsk_ptrace+3(%r9),0x02 # PT_TRACESYS
- bnz BASED(sysc_tracesys)
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ stosm 24(%r15),0x03 # reenable interrupts
+ l %r8,sys_call_table-entry_base(%r8,%r13) # get system call addr.
+ tm __TI_flags+3(%r9),_TIF_SYSCALL_TRACE
+ bo BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
# ATTENTION: check sys_execve_glue before
# changing anything here !!
sysc_return:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- bno BASED(sysc_leave) # no-> skip resched & signal
-#
-# check, if reschedule is needed
-#
-#error icm %r0,15,need_resched(%r9) # get need_resched from task_struct
- bnz BASED(sysc_reschedule)
-#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
- bnz BASED(sysc_signal_return)
+ stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
+ tm __TI_flags+3(%r9),_TIF_WORK_MASK
+ bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_leave:
- tm SP_PGM_OLD_ILC(%r15),0xff
- bz BASED(pgm_svcret)
- stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
RESTORE_ALL 1
#
+# One of the work bits _TIF_NOTIFY_RESUME, _TIF_SIGPENDING or
+# _TIF_NEED_RESCHED is on. Find out which one.
+#
+sysc_work:
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
+ bno BASED(sysc_leave) # no-> skip resched & signal
+ tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
+ bo BASED(sysc_reschedule)
+ # add a test for TIF_NOTIFY_RESUME here when it is used.
+ # _TIF_SIGPENDING is the only flag left
+#
# call do_signal before return
#
sysc_signal_return:
la %r2,SP_PTREGS(%r15) # load pt_regs
sr %r3,%r3 # clear *oldset
l %r1,BASED(.Ldo_signal)
- la %r14,BASED(sysc_leave)
- br %r1 # return point is sysc_leave
-
-#
-# call trace before and after sys_call
-#
-sysc_tracesys:
- l %r1,BASED(.Ltrace)
- l %r7,BASED(.Lc_ENOSYS)
- st %r7,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
- basr %r14,%r1
- l %r2,SP_R2(%r15)
- cr %r2,%r7 # compare with saved -ENOSYS
- be BASED(sysc_tracesys_dn1)
- # strace wants to change the syscall
- sll %r2,24
- srl %r2,22
- l %r8,sys_call_table-entry_base(2,%r13) # get address of system call
-sysc_tracesys_dn1:
- lm %r3,%r6,SP_R3(%r15)
- l %r2,SP_ORIG_R2(%r15)
- basr %r14,%r8 # call sys_xxx
- st %r2,SP_R2(%r15) # store return value
- l %r1,BASED(.Ltrace)
la %r14,BASED(sysc_return)
br %r1 # return point is sysc_return
-
#
# call schedule with sysc_return as return-address
#
la %r14,BASED(sysc_return)
br %r1 # call scheduler, return to sysc_return
+#
+# call trace before and after sys_call
+#
+sysc_tracesys:
+ l %r1,BASED(.Ltrace)
+ mvc SP_R2(4,%r15),BASED(.Lc_ENOSYS)
+ basr %r14,%r1
+ clc SP_R2(4,%r15),BASED(.Lc256)
+ bnl BASED(sysc_tracego)
+ l %r8,SP_R2(%r15) # strace changed the syscall
+ sll %r8,2
+ l %r8,sys_call_table-entry_base(%r8,%r13)
+sysc_tracego:
+ lm %r3,%r6,SP_R3(%r15)
+ l %r2,SP_ORIG_R2(%r15)
+ basr %r14,%r8 # call sys_xxx
+ st %r2,SP_R2(%r15) # store return value
+ tm __TI_flags+3(%r9),_TIF_SYSCALL_TRACE
+ bno BASED(sysc_return)
+ l %r1,BASED(.Ltrace)
+ la %r14,BASED(sysc_return)
+ br %r1
+
#
# a new process exits the kernel with ret_from_fork
#
ret_from_fork:
basr %r13,0
l %r13,.Lentry_base-.(%r13) # setup base pointer to &entry_base
- GET_CURRENT # load pointer to task_struct to R9
- stosm 24(%r15),0x03 # reenable interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
sr %r0,%r0 # child returns 0
st %r0,SP_R2(%r15) # store return value (change R2 on stack)
-#if CONFIG_SMP || CONFIG_PREEMPT
+#ifdef CONFIG_SMP
l %r1,BASED(.Lschedtail)
la %r14,BASED(sysc_return)
br %r1 # call schedule_tail, return to sysc_return
#else
- b BASED(sysc_return)
+ b BASED(sysc_return)
#endif
#
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
- .long sys_ni_syscall /* 222 - reserved for posix_acl */
- .long sys_ni_syscall /* 223 - reserved for posix_acl */
- .long sys_ni_syscall /* 224 - reserved for posix_acl */
- .rept 255-224
+ .long sys_readahead
+ .long sys_sendfile64
+ .long sys_setxattr
+ .long sys_lsetxattr /* 225 */
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr /* 230 */
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr /* 235 */
+ .long sys_gettid
+ .long sys_tkill
+ .long sys_futex
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity /* 240 */
+ .rept 255-240
.long sys_ni_syscall
- .long sys_gettid /* 226 */
- .long sys_tkill /* 227 */
.endr
/*
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- stm %r13,%r15,__LC_SAVE_AREA
- basr %r13,0 # temp base pointer
- l %r13,.Lentry_base-.(%r13)# load &entry_base to %r13
+ SAVE_ALL_BASE
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
- stam %a2,%a4,__LC_SAVE_AREA+12
- bz BASED(pgm_sv) # skip if not
+ bnz BASED(pgm_per) # got per exception -> special case
+ SAVE_ALL __LC_PGM_OLD_PSW,1
+ l %r7,BASED(.Ljump_table)
+ lh %r8,__LC_PGM_INT_CODE
+ sll %r8,2
+ GET_THREAD_INFO
+ l %r7,0(%r8,%r7) # load address of handler routine
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ l %r3,__LC_PGM_ILC # load program interruption code
+ la %r14,BASED(sysc_return)
+ br %r7 # branch to interrupt-handler
+
+#
+# handle per exception
+#
+pgm_per:
tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
- bnz BASED(pgm_sv) # skip if it is
+ bnz BASED(pgm_per_std) # ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
be BASED(pgm_svcper)
# no interesting special case, ignore PER event
lm %r13,%r15,__LC_SAVE_AREA
lpsw 0x28
+
+#
+# Normal per exception
+#
+pgm_per_std:
+ SAVE_ALL __LC_PGM_OLD_PSW,1
+ GET_THREAD_INFO
+ la %r4,0x7f
+ l %r3,__LC_PGM_ILC # load program interruption code
+ nr %r4,%r3 # clear per-event-bit and ilc
+ be BASED(pgm_per_only) # only per or per+check ?
+ l %r1,BASED(.Ljump_table)
+ sll %r4,2
+ l %r1,0(%r4,%r1) # load address of handler routine
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ basr %r14,%r1 # branch to interrupt-handler
+pgm_per_only:
+ la %r2,SP_PTREGS(15) # address of register-save area
+ l %r1,BASED(.Lhandle_per) # load adr. of per handler
+ la %r14,BASED(sysc_return) # load adr. of system return
+ br %r1 # branch to handle_per_exception
+
+#
# it was a single stepped SVC that is causing all the trouble
+#
pgm_svcper:
- tm 0x21,0x01 # test problem state bit
- bz BASED(.+12) # skip stack & access regs setup
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
- lam %a2,%a4,BASED(.Lc_ac) # set ac.reg. 2 to primary space
- # and access reg. 4 to home space
- s %r15,BASED(.Lc_spsize) # make room for registers & psw
- n %r15,BASED(.Lc0xfffffff8) # align stack pointer to 8
- stm %r0,%r12,SP_R0(%r15) # store gprs 0-12 to kernel stack
- st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- mvc SP_R13(12,%r15),__LC_SAVE_AREA # move R13-R15 to stack
- stam %a0,%a15,SP_AREGS(%r15) # store access registers to kst.
- mvc SP_AREGS+8(12,%r15),__LC_SAVE_AREA+12 # store ac. regs
- mvc SP_PSW(8,%r15),0x20 # move user PSW to stack
- la %r0,0x20 # store trap indication
- st %r0,SP_TRAP(%r15)
- xc 0(4,%r15),0(%r15) # clear back chain
- mvc SP_PGM_OLD_ILC(4,%r15),__LC_PGM_ILC # save program check information
- b BASED(pgm_system_call) # now do the svc
-pgm_svcret:
- mvi SP_TRAP+3(%r15),0x28 # set trap indication back to pgm_chk
- lh %r7,SP_PGM_OLD_ILC(%r15) # get ilc from stack
- mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid
- b BASED(pgm_no_sv)
-pgm_sv:
- tm 0x29,0x01 # test problem state bit
- bz BASED(.+12) # skip stack & access regs setup
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
- lam %a2,%a4,BASED(.Lc_ac) # set ac.reg. 2 to primary space
- # and access reg. 4 to home space
- s %r15,BASED(.Lc_spsize) # make room for registers & psw
- n %r15,BASED(.Lc0xfffffff8) # align stack pointer to 8
- stm %r0,%r12,SP_R0(%r15) # store gprs 0-12 to kernel stack
- st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- mvc SP_R13(12,%r15),__LC_SAVE_AREA # move R13-R15 to stack
- stam %a0,%a15,SP_AREGS(%r15) # store access registers to kst.
- mvc SP_AREGS+8(12,%r15),__LC_SAVE_AREA+12 # store ac. regs
- mvc SP_PSW(8,%r15),0x28 # move user PSW to stack
- la %r0,0x28 # store trap indication
- st %r0,SP_TRAP(%r15)
- xc 0(4,%r15),0(%r15) # clear back chain
- mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid
- lh %r7,__LC_PGM_ILC # load instruction length
- GET_CURRENT
-pgm_no_sv:
- la %r3,0x7f
- lh %r8,__LC_PGM_INT_CODE # N.B. saved int code used later KEEP it
- nr %r3,%r8 # reload & clear per-event-bit
- be BASED(pgm_dn) # none of Martins exceptions occurred bypass
- l %r1,BASED(.Ljump_table)
- sll %r3,2
- l %r1,0(%r3,%r1) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
- srl %r3,2
- cl %r3,BASED(.Lc4) # protection-exception ?
- bne BASED(pgm_per) # if not,
- l %r5,SP_PSW+4(15) # load psw addr
- sr %r5,%r7 # substract ilc from psw
- st %r5,SP_PSW+4(15) # store corrected psw addr
-pgm_per:basr %r14,%r1 # branch to interrupt-handler
-pgm_dn: n %r8,BASED(.Lc128) # check for per excepton
- be BASED(pgm_return)
- la %r2,SP_PTREGS(15) # address of register-save area
- l %r1,BASED(.Lhandle_per) # load adr. of per handler
- la %r14,BASED(sysc_return) # load adr. of system return
- br %r1 # branch to handle_per_exception
+ SAVE_ALL __LC_SVC_OLD_PSW,1
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ lh %r8,0x8a # get svc number from lowcore
+ sll %r8,2
+ stosm 24(%r15),0x03 # reenable interrupts
+ l %r8,sys_call_table-entry_base(%r8,%r13) # get system call addr.
+ tm __TI_flags+3(%r9),_TIF_SYSCALL_TRACE
+ bo BASED(pgm_tracesys)
+ basr %r14,%r8 # call sys_xxxx
+ st %r2,SP_R2(%r15) # store return value (change R2 on stack)
+ # ATTENTION: check sys_execve_glue before
+ # changing anything here !!
+pgm_svcret:
+ tm __TI_flags+3(%r9),_TIF_SIGPENDING
+ bo BASED(pgm_svcper_nosig)
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ sr %r3,%r3 # clear *oldset
+ l %r1,BASED(.Ldo_signal)
+ basr %r4,%r1 # call do_signal
+
+pgm_svcper_nosig:
+ mvi SP_TRAP+3(%r15),0x28 # set trap indication to pgm check
+ la %r2,SP_PTREGS(15) # address of register-save area
+ l %r1,BASED(.Lhandle_per) # load adr. of per handler
+ la %r14,BASED(sysc_return) # load adr. of system return
+ br %r1 # branch to handle_per_exception
#
-# the backend code is the same as for sys-call
+# call trace before and after sys_call
#
-pgm_return:
- b BASED(sysc_return)
+pgm_tracesys:
+ l %r1,BASED(.Ltrace)
+ mvc SP_R2(%r15),BASED(.Lc_ENOSYS)
+ basr %r14,%r1
+ clc SP_R2(4,%r15),BASED(.Lc256)
+ bnl BASED(pgm_svc_go)
+ l %r8,SP_R2(%r15) # strace changed the syscall
+ sll %r8,2
+ l %r8,sys_call_table-entry_base(%r8,%r13)
+pgm_svc_go:
+ lm %r3,%r6,SP_R3(%r15)
+ l %r2,SP_ORIG_R2(%r15)
+ basr %r14,%r8 # call sys_xxx
+ st %r2,SP_R2(%r15) # store return value
+ tm __TI_flags+3(%r9),_TIF_SYSCALL_TRACE
+ bno BASED(pgm_svcret)
+ l %r1,BASED(.Ltrace)
+ la %r14,BASED(pgm_svcret)
+ br %r1
/*
* IO interrupt handler routine
.globl io_int_handler
io_int_handler:
+ SAVE_ALL_BASE
SAVE_ALL __LC_IO_OLD_PSW,0
- GET_CURRENT # load pointer to task_struct to R9
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
sr %r3,%r3
- icm %r3,%r3,__LC_SUBCHANNEL_NR # load subchannel nr & extend to int
+ icm %r3,3,__LC_SUBCHANNEL_NR # load subchannel nr & extend to int
l %r4,__LC_IO_INT_PARM # load interuption parm
l %r5,__LC_IO_INT_WORD # load interuption word
- l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
basr %r14,%r1 # branch to standard irq handler
io_return:
-#
+#
# check, if bottom-half has to be done
#
- l %r1,processor(%r9) # get cpu number from task struture
+ l %r1,__TI_cpu(%r9)
sll %r1,L1_CACHE_SHIFT
- al %r1,BASED(.Lirq_stat) # get address of irq_stat
- icm %r0,15,0(%r1) # test irq_stat[#cpu].__softirq_pending
+ al %r1,BASED(.Lirq_stat) # get address of irq_stat
+ icm %r0,15,0(%r1) # test irq_stat[#cpu].__softirq_pending
bnz BASED(io_handle_bottom_half)
io_return_bh:
-
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- bno BASED(io_leave) # no-> skip resched & signal
- stosm 24(%r15),0x03 # reenable interrupts
-#
-# check, if reschedule is needed
-#
-#error icm %r0,15,need_resched(%r9) # get need_resched from task_struct
- bnz BASED(io_reschedule)
-#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
- bnz BASED(io_signal_return)
+ tm __TI_flags+3(%r9),_TIF_WORK_MASK
+ bnz BASED(io_work) # there is work to do (signals etc.)
io_leave:
- stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
+ stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
RESTORE_ALL 0
#
br %r1 # call do_softirq
#
-# call schedule with io_return as return-address
+# One of the work bits _TIF_NOTIFY_RESUME, _TIF_SIGPENDING or
+# _TIF_NEED_RESCHED is on. Find out which one.
#
-io_reschedule:
- l %r1,BASED(.Lschedule)
- la %r14,BASED(io_return)
- br %r1 # call scheduler, return to io_return
+io_work:
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
+ bno BASED(io_leave) # no-> skip resched & signal
+ stosm 24(%r15),0x03 # reenable interrupts
+ tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
+ bo BASED(io_reschedule)
+ # add a test for TIF_NOTIFY_RESUME here when it is used.
+ # _TIF_SIGPENDING is the only flag left
#
# call do_signal before return
la %r14,BASED(io_leave)
br %r1 # return point is io_leave
+#
+# call schedule with io_return as return-address
+#
+io_reschedule:
+ l %r1,BASED(.Lschedule)
+ la %r14,BASED(io_return)
+ br %r1 # call scheduler, return to io_return
+
/*
* External interrupt handler routine
*/
.globl ext_int_handler
ext_int_handler:
+ SAVE_ALL_BASE
SAVE_ALL __LC_EXT_OLD_PSW,0
- GET_CURRENT # load pointer to task_struct to R9
- la %r2,SP_PTREGS(%r15) # address of register-save area
- lh %r3,__LC_EXT_INT_CODE # error code
- lr %r1,%r3 # calculate index = code & 0xff
- n %r1,BASED(.Lc0xff)
- sll %r1,2
- l %r4,BASED(.Lext_hash)
- l %r4,0(%r1,%r4) # get first list entry for hash value
- ltr %r4,%r4 # == NULL ?
- bz BASED(io_return) # yes, nothing to do, exit
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ lh %r6,__LC_EXT_INT_CODE # get interruption code
+ lr %r1,%r6 # calculate index = code & 0xff
+ n %r1,BASED(.Lc0xff)
+ sll %r1,2
+ l %r7,BASED(.Lext_hash)
+ l %r7,0(%r1,%r7) # get first list entry for hash value
+ ltr %r7,%r7 # == NULL ?
+ bz BASED(io_return) # yes, nothing to do, exit
ext_int_loop:
- ch %r3,8(%r4) # compare external interrupt code
- be BASED(ext_int_found)
- icm %r4,15,0(%r4) # next list entry
- bnz BASED(ext_int_loop)
- b BASED(io_return)
-ext_int_found:
- l %r4,4(%r4) # get handler address
- la %r14,BASED(io_return)
- br %r4 # branch to ext call handler
+ ch %r6,8(%r7) # compare external interrupt code
+ bne BASED(ext_int_next)
+ l %r1,4(%r7) # get handler address
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ lr %r3,%r6 # interruption code
+ basr %r14,%r1 # call handler
+ext_int_next:
+ icm %r7,15,0(%r7) # next list entry
+ bnz BASED(ext_int_loop)
+ b BASED(io_return)
/*
* Machine check handler routines
.globl mcck_int_handler
mcck_int_handler:
+ SAVE_ALL_BASE
SAVE_ALL __LC_MCK_OLD_PSW,0
l %r1,BASED(.Ls390_mcck)
basr %r14,%r1 # call machine check handler
.Lc0x4000: .long 0x4000
.Lc0xff: .long 0xff
.Lc128: .long 128
+.Lc256: .long 256
/*
* Symbol constants
*/
.Ls390_mcck: .long s390_do_machine_check
.Ldo_IRQ: .long do_IRQ
-#error .Ldo_signal: .long do_signal
+.Ldo_signal: .long do_signal
.Ldo_softirq: .long do_softirq
.Lentry_base: .long entry_base
.Lext_hash: .long ext_int_hash
.Lsigreturn: .long sys_sigreturn
.Lsigsuspend: .long sys_sigsuspend
.Lsigaltstack: .long sys_sigaltstack
-#error .Ltrace: .long syscall_trace
+.Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
-
-#if CONFIG_SMP || CONFIG_PREEMPT
+#ifdef CONFIG_SMP
.Lschedtail: .long schedule_tail
#endif
+
#
.align 8
.Ldw: .long 0x000a0000,0x00000000
-.Linittu: .long init_task_union
+.Linittu: .long init_thread_union
.Lstart: .long start_kernel
.Lbss_bgn: .long __bss_start
.Lbss_end: .long _end
+++ /dev/null
-/*
- * arch/s390/kernel/ieee.h
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- */
-
-#include <linux/sched.h>
-
-static inline void _adddf(int R1,int R2)
-{
- current->tss.fprs[R1].fd = current->tss.fprs[R1].fd +
- current->tss.fprs[R2].fd;
-}
-
-static inline void _subdf(int R1,int R2)
-{
- current->tss.fprs[R1].fd = current->tss.fprs[R1].fd -
- current->tss.fprs[R2].fd;
-}
-
-static inline void _muldf(int R1,int R2)
-{
- current->tss.fprs[R1].fd = current->tss.fprs[R1].fd *
- current->tss.fprs[R2].fd;
-}
-
-static inline void _divdf(int R1,int R2)
-{
- current->tss.fprs[R1].fd = current->tss.fprs[R1].fd /
- current->tss.fprs[R2].fd;
-}
-
-static inline void _negdf(int R1,int R2)
-{
- current->tss.fprs[R1].fd = -current->tss.fprs[R1].fd;
-}
-
-static inline void _fixdfsi(int R1,int R2)
-{
- current->tss.regs->gprs[R1] = (__u32) current->tss.fprs[R2].fd;
-}
-
-static inline void _extendsidf(int R1,int R2)
-{
- current->tss.fprs[R1].fd = (double) current->tss.regs->gprs[R2];
-}
-
-
-static inline void _addsf(int R1,int R2)
-{
- current->tss.fprs[R1].ff = current->tss.fprs[R1].ff +
- current->tss.fprs[R2].ff;
-}
-
-static inline void _subsf(int R1,int R2)
-{
- current->tss.fprs[R1].ff = current->tss.fprs[R1].ff -
- current->tss.fprs[R2].ff;
-}
-
-static inline void _mulsf(int R1,int R2)
-{
- current->tss.fprs[R1].ff = current->tss.fprs[R1].ff *
- current->tss.fprs[R2].ff;
-}
-
-static inline void _divsf(int R1,int R2)
-{
- current->tss.fprs[R1].ff = current->tss.fprs[R1].ff /
- current->tss.fprs[R2].ff;
-}
-
-static inline void _negsf(int R1,int R2)
-{
- current->tss.fprs[R1].ff = -current->tss.fprs[R1].ff;
-}
-
-static inline void _fixsfsi(int R1,int R2)
-{
- current->tss.regs->gprs[R1] = (__u32) current->tss.fprs[R2].ff;
-}
-
-static inline void _extendsisf(int R1,int R2)
-{
- current->tss.fprs[R1].ff = (double) current->tss.regs->gprs[R2];
-}
-
-
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-static struct vm_area_struct init_mmap = INIT_MMAP;
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm);
/*
- * Initial task structure.
+ * Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by making sure
- * the linker maps this in the .text segment right after head.S,
- * and making head.S ensure the proper alignment.
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
*
- * The things we do for performance..
+ * All other task structs will be allocated on slabs in fork.c
*/
-union task_union init_task_union __attribute__((aligned(8192))) =
- { INIT_TASK(init_task_union.task) };
+struct task_struct init_task = INIT_TASK(init_task);
+
int show_interrupts(struct seq_file *p, void *v)
{
int i, j;
- struct irqaction * action;
seq_puts(p, " ");
if (ioinfo[i] == INVALID_STORAGE_AREA)
continue;
- action = ioinfo[i]->irq_desc.action;
-
- if (!action)
- continue;
-
seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for (j=0; j<smp_num_cpus; j++)
- seq_printf( p, "%10u ",
- kstat.irqs[cpu_logical_map(j)][i]);
-#endif
- seq_printf(p, " %14s", ioinfo[i]->irq_desc.handler->typename);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- {
- seq_printf(p, ", %s", action->name);
-
- } /* endfor */
+ seq_printf(p, " %s", ioinfo[i]->irq_desc.name);
seq_putc(p, '\n');
} /* endfor */
- seq_printf(p, "NMI: %10u\n", nmi_counter);
-#ifdef CONFIG_SMP
- seq_printf(p, "IPI: %10u\n", atomic_read(&ipi_count));
-#endif
return 0;
}
*/
#define check_smp_invalidate(cpu)
+extern void show_stack(unsigned long* esp);
+
static void show(char * str)
{
- int i;
- unsigned long *stack;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
atomic_read(&global_irq_count),local_irq_count(smp_processor_id()));
printk("bh: %d [%d]\n",
atomic_read(&global_bh_count),local_bh_count(smp_processor_id()));
- stack = (unsigned long *) &str;
- for (i = 40; i ; i--) {
- unsigned long x = *++stack;
- if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
- printk("<[%08lx]> ", x);
- }
- }
+ show_stack(NULL);
}
#define MAXCOUNT 100000000
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
/*
- * The idle loop on a S390...
+ * Return saved PC of a blocked thread. used in kernel/sched.
+ * resume in entry.S does not create a new stack frame, it
+ * just stores the registers %r6-%r15 to the frame given by
+ * schedule. We want to return the address of the caller of
+ * schedule, so we have to walk the backchain one time to
+ * find the frame schedule() store its return address.
*/
-
-static psw_t wait_psw;
-
-int cpu_idle(void *unused)
+unsigned long thread_saved_pc(struct task_struct *tsk)
{
- /* endless idle loop with no priority at all */
- init_idle();
- current->nice = 20;
- wait_psw.mask = _WAIT_PSW_MASK;
- wait_psw.addr = (unsigned long) &&idle_wakeup | 0x80000000L;
- while(1) {
- if (need_resched()) {
- schedule();
- check_pgt_cache();
- continue;
- }
-
- /* load wait psw */
- asm volatile (
- "lpsw %0"
- : : "m" (wait_psw) );
-idle_wakeup:
- }
+ unsigned long bc;
+
+ bc = *((unsigned long *) tsk->thread.ksp);
+ return *((unsigned long *) (bc+56));
}
/*
- As all the register will only be made displayable to the root
- user ( via printk ) or checking if the uid of the user is 0 from
- the /proc filesystem please god this will be secure enough DJB.
- The lines are given one at a time so as not to chew stack space in
- printk on a crash & also for the proc filesystem when you get
- 0 returned you know you've got all the lines
+ * The idle loop on a S390...
*/
-static int sprintf_regs(int line, char *buff, struct task_struct *task, struct pt_regs *regs)
+void default_idle(void)
{
- int linelen=0;
- int regno,chaincnt;
- u32 backchain,prev_backchain,endchain;
- u32 ksp = 0;
- char *mode = "???";
+ psw_t wait_psw;
+ unsigned long reg;
+
+ if (need_resched()) {
+ schedule();
+ return;
+ }
+
+ /*
+ * Wait for external, I/O or machine check interrupt and
+ * switch of machine check bit after the wait has ended.
+ */
+ wait_psw.mask = _WAIT_PSW_MASK;
+ asm volatile (
+ " basr %0,0\n"
+ "0: la %0,1f-0b(%0)\n"
+ " st %0,4(%1)\n"
+ " oi 4(%1),0x80\n"
+ " lpsw 0(%1)\n"
+ "1: la %0,2f-1b(%0)\n"
+ " st %0,4(%1)\n"
+ " oi 4(%1),0x80\n"
+ " ni 1(%1),0xf9\n"
+ " lpsw 0(%1)\n"
+ "2:"
+ : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
+}
- enum
- {
- sp_linefeed,
- sp_psw,
- sp_ksp,
- sp_gprs,
- sp_gprs1,
- sp_gprs2,
- sp_gprs3,
- sp_gprs4,
- sp_acrs,
- sp_acrs1,
- sp_acrs2,
- sp_acrs3,
- sp_acrs4,
- sp_kern_backchain,
- sp_kern_backchain1
- };
-
- if (task)
- ksp = task->thread.ksp;
- if (regs && !(regs->psw.mask & PSW_PROBLEM_STATE))
- ksp = regs->gprs[15];
-
- if (regs)
- mode = (regs->psw.mask & PSW_PROBLEM_STATE)?
- "User" : "Kernel";
-
- switch(line)
- {
- case sp_linefeed:
- linelen=sprintf(buff,"\n");
- break;
- case sp_psw:
- if(regs)
- linelen=sprintf(buff, "%s PSW: %08lx %08lx %s\n", mode,
- (unsigned long) regs->psw.mask,
- (unsigned long) regs->psw.addr,
- print_tainted());
- else
- linelen=sprintf(buff,"pt_regs=NULL some info unavailable\n");
- break;
- case sp_ksp:
- linelen=sprintf(&buff[linelen],
- "task: %08x ksp: %08x pt_regs: %08x\n",
- (addr_t)task, (addr_t)ksp, (addr_t)regs);
- break;
- case sp_gprs:
- if(regs)
- linelen=sprintf(buff, "%s GPRS:\n", mode);
- break;
- case sp_gprs1 ... sp_gprs4:
- if(regs)
- {
- regno=(line-sp_gprs1)*4;
- linelen=sprintf(buff,"%08x %08x %08x %08x\n",
- regs->gprs[regno],
- regs->gprs[regno+1],
- regs->gprs[regno+2],
- regs->gprs[regno+3]);
- }
- break;
- case sp_acrs:
- if(regs)
- linelen=sprintf(buff, "%s ACRS:\n", mode);
- break;
- case sp_acrs1 ... sp_acrs4:
- if(regs)
- {
- regno=(line-sp_acrs1)*4;
- linelen=sprintf(buff,"%08x %08x %08x %08x\n",
- regs->acrs[regno],
- regs->acrs[regno+1],
- regs->acrs[regno+2],
- regs->acrs[regno+3]);
- }
- break;
- case sp_kern_backchain:
- if (regs && (regs->psw.mask & PSW_PROBLEM_STATE))
- break;
- if (ksp)
- linelen=sprintf(buff, "Kernel BackChain CallChain\n");
- break;
- default:
- if (ksp)
- {
-
- backchain=ksp&PSW_ADDR_MASK;
- endchain=((backchain&(-8192))+8192);
- prev_backchain=backchain-1;
- line-=sp_kern_backchain1;
- for(chaincnt=0;;chaincnt++)
- {
- if((backchain==0)||(backchain>=endchain)
- ||(chaincnt>=8)||(prev_backchain>=backchain))
- break;
- if(chaincnt==line)
- {
- linelen+=sprintf(&buff[linelen]," %08x [<%08lx>]\n",
- backchain,
- *(u32 *)(backchain+56)&PSW_ADDR_MASK);
- break;
- }
- prev_backchain=backchain;
- backchain=(*((u32 *)backchain))&PSW_ADDR_MASK;
- }
- }
- }
- return(linelen);
+int cpu_idle(void)
+{
+ for (;;)
+ default_idle();
+ return 0;
}
+extern void show_registers(struct pt_regs *regs);
+extern void show_trace(unsigned long *sp);
void show_regs(struct pt_regs *regs)
{
- char buff[80];
- int i, line;
-
- printk("CPU: %d\n",smp_processor_id());
- printk("Process %s (pid: %d, stackpage=%08X)\n",
- current->comm, current->pid, 4096+(addr_t)current);
-
- for (line = 0; sprintf_regs(line, buff, current, regs); line++)
- printk(buff);
-
- if (regs->psw.mask & PSW_PROBLEM_STATE)
- {
- printk("User Code:\n");
- memset(buff, 0, 20);
- copy_from_user(buff,
- (char *) (regs->psw.addr & PSW_ADDR_MASK), 20);
- for (i = 0; i < 20; i++)
- printk("%02x ", buff[i]);
- printk("\n");
- }
-}
+ struct task_struct *tsk = current;
-char *task_show_regs(struct task_struct *task, char *buffer)
-{
- int line, len;
+ printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted());
+ printk("Process %s (pid: %d, task: %08lx, ksp: %08x)\n",
+ current->comm, current->pid, (unsigned long) tsk,
+ tsk->thread.ksp);
- for (line = 0; ; line++)
- {
- len = sprintf_regs(line, buffer, task, task->thread.regs);
- if (!len) break;
- buffer += len;
- }
- return buffer;
+ show_registers(regs);
+ /* Show stack backtrace if pt_regs is from kernel mode */
+ if (!(regs->psw.mask & PSW_PROBLEM_STATE))
+ show_trace((unsigned long *) regs->gprs[15]);
}
-
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
int clone_arg = flags | CLONE_VM;
{
current->used_math = 0;
- current->flags &= ~PF_USEDFPU;
+ clear_tsk_thread_flag(current, TIF_USEDFPU);
}
void release_thread(struct task_struct *dead_task)
unsigned long gprs[10]; /* gprs 6 -15 */
unsigned long fprs[4]; /* fpr 4 and 6 */
unsigned long empty[4];
-#if CONFIG_REMOTE_DEBUG
- struct gdb_pt_regs childregs;
-#else
struct pt_regs childregs;
-#endif
} *frame;
- frame = (struct stack_frame *) (2*PAGE_SIZE + (unsigned long) p) -1;
- frame = (struct stack_frame *) (((unsigned long) frame)&-8L);
- p->thread.regs = (struct pt_regs *)&frame->childregs;
+ frame = ((struct stack_frame *)
+ (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
p->thread.ksp = (unsigned long) frame;
memcpy(&frame->childregs,regs,sizeof(struct pt_regs));
frame->childregs.gprs[15] = new_stackp;
/* new return point is ret_from_sys_call */
frame->gprs[8] = ((unsigned long) &ret_from_fork) | 0x80000000;
+ /* start disabled because of schedule_tick and rq->lock being held */
+ frame->childregs.psw.mask &= ~0x03000000;
/* fake return stack for resume(), don't go back to schedule */
frame->gprs[9] = (unsigned long) frame;
- frame->childregs.old_ilc = -1; /* We are not single stepping an svc */
/* save fprs, if used in last task */
save_fp_regs(&p->thread.fp_regs);
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
- p->thread.fs = USER_DS;
/* Don't copy debug registers */
memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
return 0;
void FixPerRegisters(struct task_struct *task)
{
- struct pt_regs *regs = task->thread.regs;
+ struct pt_regs *regs = __KSTK_PTREGS(task);
per_struct *per_info=
(per_struct *)&task->thread.per_info;
mask=0xffffffff;
if(useraddr<PT_FPC)
{
- realuseraddr=(addr_t)&(((u8 *)task->thread.regs)[useraddr]);
+ realuseraddr=((addr_t) __KSTK_PTREGS(task)) + useraddr;
if(useraddr<PT_PSWMASK)
{
copymax=PT_PSWMASK;
{
struct task_struct *child;
int ret = -EPERM;
- unsigned long flags;
unsigned long tmp;
int copied;
ptrace_area parea;
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* you may not mess with init */
- goto out;
+ goto out_tsk;
if (request == PTRACE_ATTACH)
{
ret = ptrace_attach(child);
- goto out;
+ goto out_tsk;
}
ret = -ESRCH;
// printk("child=%lX child->flags=%lX",child,child->flags);
if(child!=current)
{
if (!(child->ptrace & PT_PTRACED))
- goto out;
+ goto out_tsk;
if (child->state != TASK_STOPPED)
{
if (request != PTRACE_KILL)
- goto out;
+ goto out_tsk;
}
- if (child->p_pptr != current)
- goto out;
+ if (child->parent != current)
+ goto out_tsk;
}
switch (request)
{
copied = access_process_vm(child,ADDR_BITS_REMOVE(addr), &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
- goto out;
+ break;
ret = put_user(tmp,(unsigned long *) data);
- goto out;
+ break;
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
case PTRACE_POKEDATA:
ret = 0;
if (access_process_vm(child,ADDR_BITS_REMOVE(addr), &data, sizeof(data), 1) == sizeof(data))
- goto out;
+ break;
ret = -EIO;
- goto out;
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
if ((unsigned long) data >= _NSIG)
break;
if (request == PTRACE_SYSCALL)
- child->ptrace |= PT_TRACESYS;
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
- child->ptrace &= ~PT_TRACESYS;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* make sure the single step bit is not set. */
clear_single_step(child);
ret = -EIO;
if ((unsigned long) data >= _NSIG)
break;
- child->ptrace &= ~PT_TRACESYS;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
set_single_step(child);
/* give it a chance to run. */
ret=copy_user(child,parea.kernel_addr,parea.process_addr,
parea.len,1,(request==PTRACE_POKEUSR_AREA));
break;
+ case PTRACE_SETOPTIONS: {
+ if (data & PTRACE_O_TRACESYSGOOD)
+ child->ptrace |= PT_TRACESYSGOOD;
+ else
+ child->ptrace &= ~PT_TRACESYSGOOD;
+ ret = 0;
+ break;
+ }
default:
ret = -EIO;
break;
}
+ out_tsk:
+ put_task_struct(child);
out:
unlock_kernel();
return ret;
asmlinkage void syscall_trace(void)
{
- lock_kernel();
- if ((current->ptrace & (PT_PTRACED|PT_TRACESYS))
- != (PT_PTRACED|PT_TRACESYS))
- goto out;
- current->exit_code = SIGTRAP;
- set_current_state(TASK_STOPPED);
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0);
+ current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
/*
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
- out:
- unlock_kernel();
}
oi .Lschib+5-.Lpg0(%r13),0x84
.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
msch .Lschib-.Lpg0(%r13)
- ssch .Liplorb-.Lpg0(%r13)
- jz .L001
+ lhi %r0,5
+.Lssch: ssch .Liplorb-.Lpg0(%r13)
+ jz .L001
+ brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/errno.h>
#include <asm/lowcore.h>
#include <asm/s390_ext.h>
* iucv and 0x2603 pfault) this is always the first element.
*/
ext_int_info_t *ext_int_hash[256] = { 0, };
-ext_int_info_t ext_int_info_timer;
-ext_int_info_t ext_int_info_hwc;
-ext_int_info_t ext_int_pfault;
int register_external_interrupt(__u16 code, ext_int_handler_t handler) {
ext_int_info_t *p;
int index;
- index = code & 0xff;
- p = ext_int_hash[index];
- while (p != NULL) {
- if (p->code == code)
- return -EBUSY;
- p = p->next;
- }
- if (code == 0x1004) /* time_init is done before kmalloc works :-/ */
- p = &ext_int_info_timer;
- else if (code == 0x2401) /* hwc_init is done too early too */
- p = &ext_int_info_hwc;
- else if (code == 0x2603) /* pfault_init is done too early too */
- p = &ext_int_pfault;
- else
- p = (ext_int_info_t *)
- kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
+ p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
if (p == NULL)
return -ENOMEM;
p->code = code;
p->handler = handler;
+ index = code & 0xff;
+ p->next = ext_int_hash[index];
+ ext_int_hash[index] = p;
+ return 0;
+}
+
+int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+ ext_int_info_t *p) {
+ int index;
+
+ if (p == NULL)
+ return -EINVAL;
+ p->code = code;
+ p->handler = handler;
+ index = code & 0xff;
p->next = ext_int_hash[index];
ext_int_hash[index] = p;
return 0;
q->next = p->next;
else
ext_int_hash[index] = p->next;
- if (code != 0x1004 && code != 0x2401 && code != 0x2603)
- kfree(p);
+ kfree(p);
return 0;
}
+int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+ ext_int_info_t *p) {
+ ext_int_info_t *q;
+ int index;
+
+ if (p == NULL || p->code != code || p->handler != handler)
+ return -EINVAL;
+ index = code & 0xff;
+ q = ext_int_hash[index];
+ if (p != q) {
+ while (q != NULL) {
+ if (q->next == p)
+ break;
+ q = q->next;
+ }
+ if (q == NULL)
+ return -ENOENT;
+ q->next = p->next;
+ } else
+ ext_int_hash[index] = p->next;
+ return 0;
+}
+
EXPORT_SYMBOL(register_external_interrupt);
EXPORT_SYMBOL(unregister_external_interrupt);
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/smp.h>
#include <asm/checksum.h>
#include <asm/delay.h>
#include <asm/setup.h>
+#include <asm/softirq.h>
#if CONFIG_IP_MULTICAST
#include <net/arp.h>
#endif
EXPORT_SYMBOL_NOVERS(_oi_bitmap);
EXPORT_SYMBOL_NOVERS(_ni_bitmap);
EXPORT_SYMBOL_NOVERS(_zb_findmap);
-EXPORT_SYMBOL_NOVERS(__copy_from_user_fixup);
-EXPORT_SYMBOL_NOVERS(__copy_to_user_fixup);
+EXPORT_SYMBOL_NOVERS(__copy_from_user_asm);
+EXPORT_SYMBOL_NOVERS(__copy_to_user_asm);
+EXPORT_SYMBOL_NOVERS(__clear_user_asm);
/*
* semaphore ops
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
/*
* string functions
EXPORT_SYMBOL_NOVERS(memcmp);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(memscan);
EXPORT_SYMBOL_NOVERS(strlen);
EXPORT_SYMBOL_NOVERS(strchr);
EXPORT_SYMBOL_NOVERS(strcmp);
EXPORT_SYMBOL_NOVERS(strnlen);
EXPORT_SYMBOL_NOVERS(strrchr);
EXPORT_SYMBOL_NOVERS(strstr);
-EXPORT_SYMBOL_NOVERS(strsep);
EXPORT_SYMBOL_NOVERS(strpbrk);
/*
EXPORT_SYMBOL(console_mode);
EXPORT_SYMBOL(console_device);
EXPORT_SYMBOL_NOVERS(do_call_softirq);
-
-
/*
- * linux/arch/S390/kernel/semaphore.c
+ * linux/arch/s390/kernel/semaphore.c
*
* S390 version
* Copyright (C) 1998-2000 IBM Corporation
*
*/
#include <linux/sched.h>
+#include <linux/errno.h>
#include <asm/semaphore.h>
/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
+ * Atomically update sem->count. Equivalent to:
+ * old_val = sem->count.counter;
+ * new_val = ((old_val >= 0) ? old_val : 0) + incr;
+ * sem->count.counter = new_val;
+ * return old_val;
*/
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+ int old_val, new_val;
+
+ __asm__ __volatile__(" l %0,0(%3)\n"
+ "0: ltr %1,%0\n"
+ " jhe 1f\n"
+ " lhi %1,0\n"
+ "1: ar %1,%4\n"
+ " cs %0,%1,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old_val), "=&d" (new_val),
+ "+m" (sem->count)
+ : "a" (&sem->count), "d" (incr) : "cc" );
+ return old_val;
+}
/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
+ * The inline function up() incremented count but the result
+ * was <= 0. This indicates that some process is waiting on
+ * the semaphore. The semaphore is free and we'll wake the
+ * first sleeping process, so we set count to 1 unless some
+ * other cpu has called up in the meantime in which case
+ * we just increment count by 1.
*/
-
void __up(struct semaphore *sem)
{
+ __sem_update_count(sem, 1);
wake_up(&sem->wait);
}
-static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
-
+/*
+ * The inline function down() decremented count and the result
+ * was < 0. The wait loop will atomically test and update the
+ * semaphore counter following the rules:
+ * count > 0: decrement count, wake up queue and exit.
+ * count <= 0: set count to -1, go to sleep.
+ */
void __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
+
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
+ while (__sem_update_count(sem, -1) <= 0) {
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
}
- spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
+/*
+ * Same as __down() with an additional test for signals.
+ * If a signal is pending the count is updated as follows:
+ * count > 0: wake up queue and exit.
+ * count <= 0: set count to 0, wake up queue and exit.
+ */
int __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
+
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers ++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
+ while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
+ __sem_update_count(sem, 0);
retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock. The
- * "-1" is because we're still hoping to get
- * the lock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
break;
}
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
schedule();
tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
}
- spin_unlock_irq(&semaphore_lock);
- tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
return retval;
}
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- */
-int __down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- int sleepers;
-
- spin_lock_irqsave(&semaphore_lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers, &sem->count))
- wake_up(&sem->wait);
-
- spin_unlock_irqrestore(&semaphore_lock, flags);
- return 1;
-}
unsigned int console_device = -1;
unsigned long memory_size = 0;
unsigned long machine_flags = 0;
-struct { unsigned long addr, size, type; } memory_chunk[16];
+struct { unsigned long addr, size, type; } memory_chunk[16] = { { 0 } };
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
__u16 boot_cpu_addr;
/*
* Force FPU initialization:
*/
- current->flags &= ~PF_USEDFPU;
+ clear_thread_flag(TIF_USEDFPU);
current->used_math = 0;
/* Setup active_mm for idle_task */
static int __init conmode_setup(char *str)
{
#if defined(CONFIG_HWC_CONSOLE)
- if (strncmp(str, "hwc", 4) == 0 && !MACHINE_IS_P390)
+ if (strncmp(str, "hwc", 4) == 0)
SET_CONSOLE_HWC;
#endif
#if defined(CONFIG_TN3215_CONSOLE)
- if (strncmp(str, "3215", 5) == 0 && (MACHINE_IS_VM || MACHINE_IS_P390))
+ if (strncmp(str, "3215", 5) == 0)
SET_CONSOLE_3215;
#endif
#if defined(CONFIG_TN3270_CONSOLE)
- if (strncmp(str, "3270", 5) == 0 && (MACHINE_IS_VM || MACHINE_IS_P390))
+ if (strncmp(str, "3270", 5) == 0)
SET_CONSOLE_3270;
#endif
return 1;
}
}
+#ifdef CONFIG_SMP
+extern void machine_restart_smp(char *);
+extern void machine_halt_smp(void);
+extern void machine_power_off_smp(void);
+
+void (*_machine_restart)(char *command) = machine_restart_smp;
+void (*_machine_halt)(void) = machine_halt_smp;
+void (*_machine_power_off)(void) = machine_power_off_smp;
+#else
/*
* Reboot, halt and power_off routines for non SMP.
*/
-
-#ifndef CONFIG_SMP
-void machine_restart(char * __unused)
+static void do_machine_restart_nonsmp(char * __unused)
{
reipl(S390_lowcore.ipl_device);
}
-void machine_halt(void)
+static void do_machine_halt_nonsmp(void)
{
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
-void machine_power_off(void)
+static void do_machine_power_off_nonsmp(void)
{
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
+
+void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
+void (*_machine_halt)(void) = do_machine_halt_nonsmp;
+void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
#endif
+ /*
+ * Reboot, halt and power_off stubs. They just call _machine_restart,
+ * _machine_halt or _machine_power_off.
+ */
+
+void machine_restart(char *command)
+{
+ _machine_restart(command);
+}
+
+void machine_halt(void)
+{
+ _machine_halt();
+}
+
+void machine_power_off(void)
+{
+ _machine_power_off();
+}
+
/*
* Setup function called from init/main.c just after the banner
* was printed.
* print what head.S has found out about the machine
*/
printk((MACHINE_IS_VM) ?
- "We are running under VM\n" :
- "We are running native\n");
+ "We are running under VM (31 bit mode)\n" :
+ "We are running native (31 bit mode)\n");
printk((MACHINE_HAS_IEEE) ?
"This machine has an IEEE fpu\n" :
"This machine has no IEEE fpu\n");
lowcore->io_new_psw.mask = _IO_PSW_MASK;
lowcore->io_new_psw.addr = _ADDR_31 + (addr_t) &io_int_handler;
lowcore->ipl_device = S390_lowcore.ipl_device;
- lowcore->kernel_stack = ((__u32) &init_task_union) + 8192;
+ lowcore->kernel_stack = ((__u32) &init_thread_union) + 8192;
lowcore->async_stack = (__u32)
__alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0) + 8192;
+ lowcore->jiffy_timer = -1LL;
set_prefix((__u32) lowcore);
cpu_init();
boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct cpuinfo_S390 *cpuinfo;
- unsigned n = v;
+ unsigned long n = (unsigned long) v - 1;
- if (!n--) {
+ if (!n) {
seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n",
smp_num_cpus, loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
- } else if (cpu_online_map & (1 << n)) {
- cpuinfo = &safe_get_cpu_lowcore(n).cpu_data;
- seq_printf(m, "processor %i: "
+ }
+ if (cpu_online_map & (1 << n)) {
+ cpuinfo = &safe_get_cpu_lowcore(n)->cpu_data;
+ seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos <= NR_CPUS ? (void)(*pos+1) : NULL;
+ return *pos <= NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
+#include <linux/tty.h>
#include <linux/personality.h>
+#include <linux/binfmts.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
static inline int map_signal(int sig)
{
- if (current->exec_domain
- && current->exec_domain->signal_invmap
+ if (current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
&& sig < 32)
- return current->exec_domain->signal_invmap[sig];
+ return current_thread_info()->exec_domain->signal_invmap[sig];
else
return sig;
}
goto give_sigsegv;
}
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (addr_t *) frame))
+ goto give_sigsegv;
+
/* Set up registers for signal handler */
regs->gprs[15] = (addr_t)frame;
regs->psw.addr = FIX_PSW(ka->sa.sa_handler);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->gprs[15]),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
(u16 *)(frame->retcode));
}
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (addr_t *) frame))
+ goto give_sigsegv;
+
/* Set up registers for signal handler */
regs->gprs[15] = (addr_t)frame;
regs->psw.addr = FIX_PSW(ka->sa.sa_handler);
#include <asm/irq.h>
#include <asm/s390_ext.h>
#include <asm/cpcmd.h>
+#include <asm/tlbflush.h>
/* prototypes */
extern int cpu_idle(void * unused);
static int max_cpus = NR_CPUS; /* Setup configured maximum number of CPUs to activate */
int smp_num_cpus;
struct _lowcore *lowcore_ptr[NR_CPUS];
-unsigned int prof_multiplier[NR_CPUS];
-unsigned int prof_old_multiplier[NR_CPUS];
-unsigned int prof_counter[NR_CPUS];
cycles_t cacheflush_time=0;
int smp_threads_ready=0; /* Set when the idlers are all forked. */
static atomic_t smp_commenced = ATOMIC_INIT(0);
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-unsigned long cpu_online_map;
+volatile unsigned long phys_cpu_present_map;
+volatile unsigned long cpu_online_map;
+unsigned long cache_decay_ticks = 0;
/*
* Setup routine for controlling SMP activation
return 0;
}
+static inline void do_send_stop(void)
+{
+ u32 dummy;
+ int i, rc;
+
+ /* stop all processors */
+ for (i = 0; i < smp_num_cpus; i++) {
+ if (smp_processor_id() == i)
+ continue;
+ do {
+ rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
+ } while (rc == sigp_busy);
+ }
+}
+
+static inline void do_store_status(void)
+{
+ unsigned long low_core_addr;
+ u32 dummy;
+ int i, rc;
+
+ /* store status of all processors in their lowcores (real 0) */
+ for (i = 0; i < smp_num_cpus; i++) {
+ if (smp_processor_id() == i)
+ continue;
+ low_core_addr = (unsigned long)get_cpu_lowcore(i);
+ do {
+ rc = signal_processor_ps(&dummy, low_core_addr, i,
+ sigp_store_status_at_address);
+ } while(rc == sigp_busy);
+ }
+}
/*
- * Various special callbacks
+ * this function sends a 'stop' sigp to all other CPUs in the system.
+ * it goes straight through.
*/
+void smp_send_stop(void)
+{
+ /* write magic number to zero page (absolute 0) */
+ get_cpu_lowcore(smp_processor_id())->panic_magic = __PANIC_MAGIC;
-void do_machine_restart(void)
+ /* stop other processors. */
+ do_send_stop();
+
+ /* store status of other processors. */
+ do_store_status();
+}
+
+/*
+ * Reboot, halt and power_off routines for SMP.
+ */
+static volatile unsigned long cpu_restart_map;
+
+static void do_machine_restart(void * __unused)
{
- smp_send_stop();
- reipl(S390_lowcore.ipl_device);
+ clear_bit(smp_processor_id(), &cpu_restart_map);
+ if (smp_processor_id() == 0) {
+ /* Wait for all other cpus to enter do_machine_restart. */
+ while (cpu_restart_map != 0);
+ /* Store status of other cpus. */
+ do_store_status();
+ /*
+ * Finally call reipl. Because we waited for all other
+ * cpus to enter this function we know that they do
+ * not hold any s390irq-locks (the cpus have been
+ * interrupted by an external interrupt and s390irq
+ * locks are always held disabled).
+ */
+ reipl(S390_lowcore.ipl_device);
+ }
+ signal_processor(smp_processor_id(), sigp_stop);
}
-void machine_restart(char * __unused)
+void machine_restart_smp(char * __unused)
{
- if (smp_processor_id() != 0) {
- smp_ext_bitcall(0, ec_restart);
- for (;;);
- } else
- do_machine_restart();
+ cpu_restart_map = cpu_online_map;
+ smp_call_function(do_machine_restart, NULL, 0, 0);
+ do_machine_restart(NULL);
}
-void do_machine_halt(void)
+static void do_machine_halt(void * __unused)
{
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- cpcmd(vmhalt_cmd, NULL, 0);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+ if (smp_processor_id() == 0) {
+ smp_send_stop();
+ if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+ cpcmd(vmhalt_cmd, NULL, 0);
+ signal_processor(smp_processor_id(),
+ sigp_stop_and_store_status);
+ }
+ for (;;)
+ enabled_wait();
}
-void machine_halt(void)
+void machine_halt_smp(void)
{
- if (smp_processor_id() != 0) {
- smp_ext_bitcall(0, ec_halt);
- for (;;);
- } else
- do_machine_halt();
+ smp_call_function(do_machine_halt, NULL, 0, 0);
+ do_machine_halt(NULL);
}
-void do_machine_power_off(void)
+static void do_machine_power_off(void * __unused)
{
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- cpcmd(vmpoff_cmd, NULL, 0);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+ if (smp_processor_id() == 0) {
+ smp_send_stop();
+ if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+ cpcmd(vmpoff_cmd, NULL, 0);
+ signal_processor(smp_processor_id(),
+ sigp_stop_and_store_status);
+ }
+ for (;;)
+ enabled_wait();
}
-void machine_power_off(void)
+void machine_power_off_smp(void)
{
- if (smp_processor_id() != 0) {
- smp_ext_bitcall(0, ec_power_off);
- for (;;);
- } else
- do_machine_power_off();
+ smp_call_function(do_machine_power_off, NULL, 0, 0);
+ do_machine_power_off(NULL);
}
/*
*
* For the ec_schedule signal we have to do nothing. All the work
* is done automatically when we return from the interrupt.
- * For the ec_restart, ec_halt and ec_power_off we call the
- * appropriate routine.
*/
do {
bits = atomic_read(&S390_lowcore.ext_call_fast);
} while (atomic_compare_and_swap(bits,0,&S390_lowcore.ext_call_fast));
- if (test_bit(ec_restart, &bits))
- do_machine_restart();
- if (test_bit(ec_halt, &bits))
- do_machine_halt();
- if (test_bit(ec_power_off, &bits))
- do_machine_power_off();
if (test_bit(ec_call_function, &bits))
do_call_function();
}
*/
static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
{
- struct _lowcore *lowcore = &get_cpu_lowcore(cpu);
+ struct _lowcore *lowcore = get_cpu_lowcore(cpu);
sigp_ccode ccode;
/*
static void smp_ext_bitcall_others(ec_bit_sig sig)
{
struct _lowcore *lowcore;
- sigp_ccode ccode;
int i;
for (i = 0; i < smp_num_cpus; i++) {
if (smp_processor_id() == i)
continue;
- lowcore = &get_cpu_lowcore(i);
+ lowcore = get_cpu_lowcore(i);
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
- ccode = signal_processor(i, sigp_external_call);
- }
-}
-
-/*
- * this function sends a 'stop' sigp to all other CPUs in the system.
- * it goes straight through.
- */
-
-void smp_send_stop(void)
-{
- int i;
- u32 dummy;
- unsigned long low_core_addr;
-
- /* write magic number to zero page (absolute 0) */
-
- get_cpu_lowcore(smp_processor_id()).panic_magic = __PANIC_MAGIC;
-
- /* stop all processors */
-
- for (i = 0; i < smp_num_cpus; i++) {
- if (smp_processor_id() != i) {
- int ccode;
- do {
- ccode = signal_processor_ps(
- &dummy,
- 0,
- i,
- sigp_stop);
- } while(ccode == sigp_busy);
- }
- }
-
- /* store status of all processors in their lowcores (real 0) */
-
- for (i = 0; i < smp_num_cpus; i++) {
- if (smp_processor_id() != i) {
- int ccode;
- low_core_addr = (unsigned long)&get_cpu_lowcore(i);
- do {
- ccode = signal_processor_ps(
- &dummy,
- low_core_addr,
- i,
- sigp_store_status_at_address);
- } while(ccode == sigp_busy);
- }
+ while (signal_processor(i, sigp_external_call) == sigp_busy)
+ udelay(10);
}
}
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-
void smp_send_reschedule(int cpu)
{
smp_ext_bitcall(cpu, ec_schedule);
{
int curr_cpu;
- current->processor = 0;
+ current_thread_info()->cpu = 0;
smp_num_cpus = 1;
+ phys_cpu_present_map = 1;
cpu_online_map = 1;
for (curr_cpu = 0;
curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) {
if (signal_processor(smp_num_cpus, sigp_sense) ==
sigp_not_operational)
continue;
+ set_bit(smp_num_cpus, &phys_cpu_present_map);
smp_num_cpus++;
}
printk("Detected %d CPU's\n",(int) smp_num_cpus);
/*
* Activate a secondary processor.
*/
-extern void init_100hz_timer(void);
+extern void init_cpu_timer(void);
extern int pfault_init(void);
extern int pfault_token(void);
{
/* Setup the cpu */
cpu_init();
+ /* Mark this cpu as online */
+ set_bit(smp_processor_id(), &cpu_online_map);
/* Print info about this processor */
- print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id()).cpu_data);
+ print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id())->cpu_data);
/* Wait for completion of smp startup */
while (!atomic_read(&smp_commenced))
/* nothing */ ;
- /* init per CPU 100 hz timer */
- init_100hz_timer();
+ /* init per CPU timer */
+ init_cpu_timer();
#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
return cpu_idle(NULL);
}
-/*
- * The restart interrupt handler jumps to start_secondary directly
- * without the detour over initialize_secondary. We defined it here
- * so that the linker doesn't complain.
- */
-void __init initialize_secondary(void)
-{
-}
-
static struct task_struct *__init fork_by_hand(void)
{
struct pt_regs regs;
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle->processor = cpu;
- idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
+ init_idle(idle, cpu);
- del_from_runqueue(idle);
unhash_process(idle);
- init_tasks[cpu] = idle;
- cpu_lowcore=&get_cpu_lowcore(cpu);
+ cpu_lowcore = get_cpu_lowcore(cpu);
cpu_lowcore->save_area[15] = idle->thread.ksp;
- cpu_lowcore->kernel_stack = (idle->thread.ksp | 8191) + 1;
+ cpu_lowcore->kernel_stack = (__u32) idle->thread_info + 8192;
__asm__ __volatile__("la 1,%0\n\t"
"stctl 0,15,0(1)\n\t"
"la 1,%1\n\t"
eieio();
signal_processor(cpu,sigp_restart);
- /* Mark this cpu as online */
- set_bit(cpu, &cpu_online_map);
}
/*
/*
* Initialize the logical to physical CPU number mapping
- * and the per-CPU profiling counter/multiplier
*/
-
- for (i = 0; i < NR_CPUS; i++) {
- prof_counter[i] = 1;
- prof_old_multiplier[i] = 1;
- prof_multiplier[i] = 1;
- }
-
- print_cpu_info(&safe_get_cpu_lowcore(0).cpu_data);
+ print_cpu_info(&safe_get_cpu_lowcore(0)->cpu_data);
- for(i = 0; i < smp_num_cpus; i++)
- {
+ for(i = 0; i < smp_num_cpus; i++) {
lowcore_ptr[i] = (struct _lowcore *)
__get_free_page(GFP_KERNEL|GFP_DMA);
- if (lowcore_ptr[i] == NULL)
- panic("smp_boot_cpus failed to "
- "allocate prefix memory\n");
async_stack = __get_free_pages(GFP_KERNEL,1);
- if (async_stack == 0)
- panic("smp_boot_cpus failed to allocate "
- "asyncronous interrupt stack\n");
+ if (lowcore_ptr[i] == NULL || async_stack == 0UL)
+ panic("smp_boot_cpus failed to allocate memory\n");
memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore));
lowcore_ptr[i]->async_stack = async_stack + (2 * PAGE_SIZE);
* Most of the parameters are set up when the cpu is
* started up.
*/
- if (smp_processor_id() == i)
+ if (smp_processor_id() == i) {
set_prefix((u32) lowcore_ptr[i]);
- else {
- ccode = signal_processor_p((u32)(lowcore_ptr[i]),
- i, sigp_set_prefix);
- if (ccode)
- /* if this gets troublesome I'll have to do
- * something about it. */
- printk("ccode %d for cpu %d returned when "
- "setting prefix in smp_boot_cpus not good.\n",
- (int) ccode, (int) i);
- else
- do_boot_cpu(i);
+ continue;
}
+ ccode = signal_processor_p((u32)(lowcore_ptr[i]),
+ i, sigp_set_prefix);
+ if (ccode)
+ panic("sigp_set_prefix failed for cpu %d "
+ "with condition code %d\n",
+ (int) i, (int) ccode);
+ do_boot_cpu(i);
}
+ /*
+ * Now wait until all of the cpus are online.
+ */
+ while (phys_cpu_present_map != cpu_online_map);
}
/*
return 0;
}
-/*
- * Local timer interrupt handler. It does both profiling and
- * process statistics/rescheduling.
- *
- * We do profiling in every local tick, statistics/rescheduling
- * happen only every 'profiling multiplier' ticks. The default
- * multiplier is 1 and it can be changed by writing the new multiplier
- * value into /proc/profile.
- */
-
-void smp_local_timer_interrupt(struct pt_regs * regs)
-{
- int user = (user_mode(regs) != 0);
- int cpu = smp_processor_id();
-
- /*
- * The profiling function is SMP safe. (nothing can mess
- * around with "current", and the profiling counters are
- * updated with atomic operations). This is especially
- * useful with a profiling multiplier != 1
- */
- if (!user_mode(regs))
- s390_do_profile(regs->psw.addr);
-
- if (!--prof_counter[cpu]) {
-
- /*
- * The multiplier may have changed since the last time we got
- * to this point as a result of the user writing to
- * /proc/profile. In this case we need to adjust the APIC
- * timer accordingly.
- *
- * Interrupts are already masked off at this point.
- */
- prof_counter[cpu] = prof_multiplier[cpu];
- if (prof_counter[cpu] != prof_old_multiplier[cpu]) {
- /* FIXME setup_APIC_timer(calibration_result/prof_counter[cpu]
- ); */
- prof_old_multiplier[cpu] = prof_counter[cpu];
- }
-
- /*
- * After doing the above, we need to make like
- * a normal interrupt - otherwise timer interrupts
- * ignore the global interrupt lock, which is the
- * WrongThing (tm) to do.
- */
-
- update_process_times(user);
- }
-}
-
EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(smp_call_function);
#include <asm/irq.h>
-
/* change this if you have some constant time drift */
-#define USECS_PER_JIFFY ((signed long)1000000/HZ)
-#define CLK_TICKS_PER_JIFFY ((signed long)USECS_PER_JIFFY<<12)
+#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
+#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
#define TICK_SIZE tick
-static uint64_t init_timer_cc, last_timer_cc;
+static ext_int_info_t ext_int_info_timer;
+static uint64_t init_timer_cc;
extern rwlock_t xtime_lock;
extern unsigned long wall_jiffies;
-void tod_to_timeval(uint64_t todval, struct timeval *xtime)
+void tod_to_timeval(__u64 todval, struct timeval *xtime)
{
const int high_bit = 0x80000000L;
const int c_f4240 = 0xf4240L;
: "cc", "memory", "2", "3", "4" );
}
-unsigned long do_gettimeoffset(void)
+static inline unsigned long do_gettimeoffset(void)
{
- __u64 timer_cc;
+ __u64 now;
- asm volatile ("STCK %0" : "=m" (timer_cc));
- /* We require the offset from the previous interrupt */
- return ((unsigned long)((timer_cc - last_timer_cc)>>12));
+ asm ("STCK 0(%0)" : : "a" (&now) : "memory", "cc");
+ now = (now - init_timer_cc) >> 12;
+ /* We require the offset from the latest update of xtime */
+ now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+ return (unsigned long) now;
}
/*
{
unsigned long flags;
unsigned long usec, sec;
- unsigned long lost_ticks;
read_lock_irqsave(&xtime_lock, flags);
- lost_ticks = jiffies - wall_jiffies;
- usec = do_gettimeoffset();
- if (lost_ticks)
- usec +=(USECS_PER_JIFFY*lost_ticks);
sec = xtime.tv_sec;
- usec += xtime.tv_usec;
+ usec = xtime.tv_usec + do_gettimeoffset();
read_unlock_irqrestore(&xtime_lock, flags);
while (usec >= 1000000) {
extern __u16 boot_cpu_addr;
#endif
-void do_timer_interrupt(struct pt_regs *regs, __u16 error_code)
+static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
{
int cpu = smp_processor_id();
irq_enter(cpu, 0);
- /*
- * reset timer to 10ms minus time already elapsed
- * since timer-interrupt pending
- */
+ /*
+ * set clock comparator for next tick
+ */
+ S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
+ asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
+
#ifdef CONFIG_SMP
- if(S390_lowcore.cpu_data.cpu_addr==boot_cpu_addr) {
+ if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr)
write_lock(&xtime_lock);
- last_timer_cc = S390_lowcore.jiffy_timer_cc;
- }
-#else
- last_timer_cc = S390_lowcore.jiffy_timer_cc;
-#endif
- /* set clock comparator */
- S390_lowcore.jiffy_timer_cc += CLK_TICKS_PER_JIFFY;
- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer_cc));
-/*
- * In the SMP case we use the local timer interrupt to do the
- * profiling, except when we simulate SMP mode on a uniprocessor
- * system, in that case we have to call the local interrupt handler.
- */
-#ifdef CONFIG_SMP
- /* when SMP, do smp_local_timer_interrupt for *all* CPUs,
- but only do the rest for the boot CPU */
- smp_local_timer_interrupt(regs);
-#else
- if (!user_mode(regs))
- s390_do_profile(regs->psw.addr);
-#endif
+ update_process_times(user_mode(regs));
-#ifdef CONFIG_SMP
- if(S390_lowcore.cpu_data.cpu_addr==boot_cpu_addr)
-#endif
- {
+ if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr) {
do_timer(regs);
-#ifdef CONFIG_SMP
write_unlock(&xtime_lock);
-#endif
}
+#else
+ do_timer(regs);
+#endif
irq_exit(cpu, 0);
}
/*
* Start the clock comparator on the current CPU
*/
-static long cr0 __attribute__ ((aligned (8)));
-
-void init_100hz_timer(void)
+void init_cpu_timer(void)
{
+ unsigned long cr0;
+
/* allow clock comparator timer interrupt */
asm volatile ("STCTL 0,0,%0" : "=m" (cr0) : : "memory");
cr0 |= 0x800;
asm volatile ("LCTL 0,0,%0" : : "m" (cr0) : "memory");
- /* set clock comparator */
- /* read the TOD clock */
- asm volatile ("STCK %0" : "=m" (S390_lowcore.jiffy_timer_cc));
- S390_lowcore.jiffy_timer_cc += CLK_TICKS_PER_JIFFY;
- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer_cc));
+ S390_lowcore.jiffy_timer = (__u64) jiffies * CLK_TICKS_PER_JIFFY;
+ S390_lowcore.jiffy_timer += init_timer_cc + CLK_TICKS_PER_JIFFY;
+ asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
}
/*
*/
void __init time_init(void)
{
+ __u64 set_time_cc;
int cc;
/* kick the TOD clock */
- asm volatile ("STCK %1\n\t"
+ asm volatile ("STCK 0(%1)\n\t"
"IPM %0\n\t"
- "SRL %0,28" : "=r" (cc), "=m" (init_timer_cc));
+ "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
+ : "memory", "cc");
switch (cc) {
case 0: /* clock in set state: all is fine */
break;
printk("time_init: TOD clock stopped/non-operational\n");
break;
}
+
+ /* set xtime */
+ set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
+ (0x3c26700LL*1000000*4096);
+ tod_to_timeval(set_time_cc, &xtime);
+
/* request the 0x1004 external interrupt */
- if (register_external_interrupt(0x1004, do_timer_interrupt) != 0)
- panic("Couldn't request external interrupts 0x1004");
- init_100hz_timer();
- init_timer_cc = S390_lowcore.jiffy_timer_cc;
- init_timer_cc -= 0x8126d60e46000000LL -
- (0x3c26700LL*1000000*4096);
- tod_to_timeval(init_timer_cc, &xtime);
+ if (register_early_external_interrupt(0x1004, do_comparator_interrupt,
+ &ext_int_info_timer) != 0)
+ panic("Couldn't request external interrupt 0x1004");
+
+ /* init CPU timer */
+ init_cpu_timer();
}
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/mathemu.h>
-#if CONFIG_REMOTE_DEBUG
-#include <asm/gdb-stub.h>
-#endif
#include <asm/cpcmd.h>
#include <asm/s390_ext.h>
#endif
#endif
-extern pgm_check_handler_t do_page_fault;
+extern pgm_check_handler_t do_protection_exception;
+extern pgm_check_handler_t do_segment_exception;
+extern pgm_check_handler_t do_page_exception;
extern pgm_check_handler_t do_pseudo_page_fault;
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
+static ext_int_info_t ext_int_pfault;
#endif
+int kstack_depth_to_print = 12;
+
+/*
+ * If the address is either in the .text section of the
+ * kernel, or in the vmalloc'ed module regions, it *may*
+ * be the address of a calling routine
+ */
+extern char _stext, _etext;
+
+#ifdef CONFIG_MODULES
+
+extern struct module *module_list;
+extern struct module kernel_module;
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ int retval = 0;
+ struct module *mod;
+
+ if (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext)
+ return 1;
+
+ for (mod = module_list; mod != &kernel_module; mod = mod->next) {
+ /* mod_bound tests for addr being inside the vmalloc'ed
+ * module area. Of course it'd be better to test only
+ * for the .text subset... */
+ if (mod_bound(addr, 0, mod)) {
+ retval = 1;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+#else
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ return (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext);
+}
+
+#endif
+
+void show_trace(unsigned long * stack)
+{
+ unsigned long backchain, low_addr, high_addr, ret_addr;
+ int i;
+
+ if (!stack)
+ stack = (unsigned long*)&stack;
+
+ printk("Call Trace: ");
+ low_addr = ((unsigned long) stack) & PSW_ADDR_MASK;
+ high_addr = (low_addr & (-THREAD_SIZE)) + THREAD_SIZE;
+ /* Skip the first frame (biased stack) */
+ backchain = *((unsigned long *) low_addr) & PSW_ADDR_MASK;
+ /* Print up to 8 lines */
+ for (i = 0; i < 8; i++) {
+ if (backchain < low_addr || backchain >= high_addr)
+ break;
+ ret_addr = *((unsigned long *) (backchain+56)) & PSW_ADDR_MASK;
+ if (!kernel_text_address(ret_addr))
+ break;
+ if (i && ((i % 6) == 0))
+ printk("\n ");
+ printk("[<%08lx>] ", ret_addr);
+ low_addr = backchain;
+ backchain = *((unsigned long *) backchain) & PSW_ADDR_MASK;
+ }
+ printk("\n");
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+ /*
+ * We can't print the backtrace of a running process. It is
+ * unreliable at best and can cause kernel oopses.
+ */
+ if (tsk->state == TASK_RUNNING)
+ return;
+ show_trace((unsigned long *) tsk->thread.ksp);
+}
+
+void show_stack(unsigned long *sp)
+{
+ unsigned long *stack;
+ int i;
+
+ // debugging aid: "show_stack(NULL);" prints the
+ // back trace for this cpu.
+
+ if(sp == NULL)
+ sp = (unsigned long*) &sp;
+
+ stack = sp;
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+ printk("\n");
+ show_trace(sp);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ mm_segment_t old_fs;
+ char *mode;
+ int i;
+
+ mode = (regs->psw.mask & PSW_PROBLEM_STATE) ? "User" : "Krnl";
+ printk("%s PSW : %08lx %08lx\n",
+ mode, (unsigned long) regs->psw.mask,
+ (unsigned long) regs->psw.addr);
+ printk("%s GPRS: %08x %08x %08x %08x\n", mode,
+ regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+ printk("%s ACRS: %08x %08x %08x %08x\n", mode,
+ regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
+
+ /*
+ * Print the first 20 byte of the instruction stream at the
+ * time of the fault.
+ */
+ old_fs = get_fs();
+ if (regs->psw.mask & PSW_PROBLEM_STATE)
+ set_fs(USER_DS);
+ else
+ set_fs(KERNEL_DS);
+ printk("%s Code: ", mode);
+ for (i = 0; i < 20; i++) {
+ unsigned char c;
+ if (__get_user(c, (char *)(regs->psw.addr + i))) {
+ printk(" Bad PSW.");
+ break;
+ }
+ printk("%02x ", c);
+ }
+ set_fs(old_fs);
+
+ printk("\n");
+}
+
+/* This is called from fs/proc/array.c */
+char *task_show_regs(struct task_struct *task, char *buffer)
+{
+ struct pt_regs *regs;
+
+ regs = __KSTK_PTREGS(task);
+ buffer += sprintf(buffer, "task: %08lx, ksp: %08x\n",
+ (unsigned long) task, task->thread.ksp);
+ buffer += sprintf(buffer, "User PSW : %08lx %08lx\n",
+ (unsigned long) regs->psw.mask,
+ (unsigned long) regs->psw.addr);
+ buffer += sprintf(buffer, "User GPRS: %08x %08x %08x %08x\n",
+ regs->gprs[0], regs->gprs[1],
+ regs->gprs[2], regs->gprs[3]);
+ buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
+ regs->gprs[4], regs->gprs[5],
+ regs->gprs[6], regs->gprs[7]);
+ buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
+ regs->gprs[8], regs->gprs[9],
+ regs->gprs[10], regs->gprs[11]);
+ buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
+ regs->gprs[12], regs->gprs[13],
+ regs->gprs[14], regs->gprs[15]);
+ buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
+ regs->acrs[0], regs->acrs[1],
+ regs->acrs[2], regs->acrs[3]);
+ buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
+ regs->acrs[4], regs->acrs[5],
+ regs->acrs[6], regs->acrs[7]);
+ buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
+ regs->acrs[8], regs->acrs[9],
+ regs->acrs[10], regs->acrs[11]);
+ buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
+ regs->acrs[12], regs->acrs[13],
+ regs->acrs[14], regs->acrs[15]);
+ return buffer;
+}
+
spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
-#define DO_ERROR(signr, str, name) \
-asmlinkage void name(struct pt_regs * regs, long interruption_code) \
-{ \
- do_trap(interruption_code, signr, str, regs, NULL); \
-}
-
-#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
-asmlinkage void name(struct pt_regs * regs, long interruption_code) \
-{ \
- siginfo_t info; \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = (void *)siaddr; \
- do_trap(interruption_code, signr, str, regs, &info); \
-}
-
static void inline do_trap(long interruption_code, int signr, char *str,
struct pt_regs *regs, siginfo_t *info)
{
if (regs->psw.mask & PSW_PROBLEM_STATE) {
struct task_struct *tsk = current;
- tsk->thread.trap_no = interruption_code;
+ tsk->thread.trap_no = interruption_code & 0xffff;
if (info)
force_sig_info(signr, info, tsk);
else
}
}
+static inline void *get_check_address(struct pt_regs *regs)
+{
+ return (void *) ADDR_BITS_REMOVE(regs->psw.addr-S390_lowcore.pgm_ilc);
+}
+
int do_debugger_trap(struct pt_regs *regs,int signal)
{
if(regs->psw.mask&PSW_PROBLEM_STATE)
#if CONFIG_REMOTE_DEBUG
if(gdb_stub_initialised)
{
- gdb_stub_handle_exception((struct gdb_pt_regs *)regs,signal);
+ gdb_stub_handle_exception(regs, signal);
return 0;
}
#endif
return 0;
}
+#define DO_ERROR(signr, str, name) \
+asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+{ \
+ do_trap(interruption_code, signr, str, regs, NULL); \
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
+asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void *)siaddr; \
+ do_trap(interruption_code, signr, str, regs, &info); \
+}
+
DO_ERROR(SIGSEGV, "Unknown program exception", default_trap_handler)
-DO_ERROR(SIGILL, "privileged operation", privileged_op)
-DO_ERROR(SIGILL, "execute exception", execute_exception)
-DO_ERROR(SIGSEGV, "addressing exception", addressing_exception)
-DO_ERROR(SIGFPE, "fixpoint divide exception", divide_exception)
-DO_ERROR(SIGILL, "translation exception", translation_exception)
-DO_ERROR(SIGILL, "special operand exception", special_op_exception)
-DO_ERROR(SIGILL, "operand exception", operand_exception)
+
+DO_ERROR_INFO(SIGBUS, "addressing exception", addressing_exception,
+ BUS_ADRERR, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
+ FPE_INTDIV, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
+ ILL_PRVOPC, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
+ ILL_ILLOPN, get_check_address(regs))
+
+static inline void
+do_fp_trap(struct pt_regs *regs, void *location,
+ int fpc, long interruption_code)
+{
+ siginfo_t si;
+
+ si.si_signo = SIGFPE;
+ si.si_errno = 0;
+ si.si_addr = location;
+ si.si_code = 0;
+ /* FPC[2] is Data Exception Code */
+ if ((fpc & 0x00000300) == 0) {
+ /* bits 6 and 7 of DXC are 0 iff IEEE exception */
+ if (fpc & 0x8000) /* invalid fp operation */
+ si.si_code = FPE_FLTINV;
+ else if (fpc & 0x4000) /* div by 0 */
+ si.si_code = FPE_FLTDIV;
+ else if (fpc & 0x2000) /* overflow */
+ si.si_code = FPE_FLTOVF;
+ else if (fpc & 0x1000) /* underflow */
+ si.si_code = FPE_FLTUND;
+ else if (fpc & 0x0800) /* inexact */
+ si.si_code = FPE_FLTRES;
+ }
+ current->thread.ieee_instruction_pointer = (addr_t) location;
+ do_trap(interruption_code, SIGFPE,
+ "floating point exception", regs, &si);
+}
asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
{
#endif
else
signal = SIGILL;
- if (signal == SIGFPE) {
- current->thread.ieee_instruction_pointer = (addr_t) location;
- do_trap(interruption_code, signal,
- "floating point exception", regs, NULL);
- } else if (signal)
+ if (signal == SIGFPE)
+ do_fp_trap(regs, location,
+ current->thread.fp_regs.fpc, interruption_code);
+ else if (signal)
do_trap(interruption_code, signal,
"illegal operation", regs, NULL);
}
__u16 *location = NULL;
int signal = 0;
- location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
+ location = (__u16 *) get_check_address(regs);
/*
* We got all needed information from the lowcore and can
}
} else
signal = SIGILL;
- if (signal == SIGFPE) {
- current->thread.ieee_instruction_pointer = (addr_t) location;
- do_trap(interruption_code, signal,
- "floating point exception", regs, NULL);
- } else if (signal)
- do_trap(interruption_code, signal,
- "specification exception", regs, NULL);
+ if (signal == SIGFPE)
+ do_fp_trap(regs, location,
+ current->thread.fp_regs.fpc, interruption_code);
+ else if (signal) {
+ siginfo_t info;
+ info.si_signo = signal;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPN;
+ info.si_addr = location;
+ do_trap(interruption_code, signal,
+ "specification exception", regs, &info);
+ }
}
#else
-DO_ERROR(SIGILL, "specification exception", specification_exception)
+DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
+ ILL_ILLOPN, get_check_address(regs));
#endif
asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
{
- __u8 opcode[6];
__u16 *location;
int signal = 0;
- location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
+ location = (__u16 *) get_check_address(regs);
/*
* We got all needed information from the lowcore and can
#ifdef CONFIG_MATHEMU
else if (regs->psw.mask & PSW_PROBLEM_STATE) {
+ __u8 opcode[6];
get_user(*((__u16 *) opcode), location);
switch (opcode[0]) {
case 0x28: /* LDR Rx,Ry */
signal = SIGFPE;
else
signal = SIGILL;
- if (signal == SIGFPE) {
- current->thread.ieee_instruction_pointer = (addr_t) location;
- do_trap(interruption_code, signal,
- "floating point exception", regs, NULL);
- } else if (signal)
- do_trap(interruption_code, signal,
- "data exception", regs, NULL);
+ if (signal == SIGFPE)
+ do_fp_trap(regs, location,
+ current->thread.fp_regs.fpc, interruption_code);
+ else if (signal) {
+ siginfo_t info;
+ info.si_signo = signal;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPN;
+ info.si_addr = location;
+ do_trap(interruption_code, signal,
+ "data exception", regs, &info);
+ }
}
pgm_check_table[1] = &illegal_op;
pgm_check_table[2] = &privileged_op;
pgm_check_table[3] = &execute_exception;
- pgm_check_table[4] = &do_page_fault;
+ pgm_check_table[4] = &do_protection_exception;
pgm_check_table[5] = &addressing_exception;
pgm_check_table[6] = &specification_exception;
pgm_check_table[7] = &data_exception;
pgm_check_table[9] = ÷_exception;
- pgm_check_table[0x10] = &do_page_fault;
- pgm_check_table[0x11] = &do_page_fault;
+ pgm_check_table[0x10] = &do_segment_exception;
+ pgm_check_table[0x11] = &do_page_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
pgm_check_table[0x14] = &do_pseudo_page_fault;
#ifdef CONFIG_PFAULT
if (MACHINE_IS_VM) {
/* request the 0x2603 external interrupt */
- if (register_external_interrupt(0x2603, pfault_interrupt) != 0)
+ if (register_early_external_interrupt(0x2603, pfault_interrupt,
+ &ext_int_pfault) != 0)
panic("Couldn't request external interrupt 0x2603");
/*
* First try to get pfault pseudo page faults going.
*/
if (pfault_init() != 0) {
/* Tough luck, no pfault. */
- unregister_external_interrupt(0x2603,
- pfault_interrupt);
+ unregister_early_external_interrupt(0x2603,
+ pfault_interrupt,
+ &ext_int_pfault);
cpcmd("SET PAGEX ON", NULL, 0);
}
}
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
- * These functions have a non-standard call interface
+ * These functions have standard call interface
*/
#include <asm/lowcore.h>
.text
.align 4
- .globl __copy_from_user_fixup
-__copy_from_user_fixup:
- l 1,__LC_PGM_OLD_PSW+4
- sll 4,1
- srl 4,1
-0: lhi 3,-4096
- sll 3,1
- srl 3,1
- n 3,__LC_TRANS_EXC_ADDR
- sr 3,4
- bm 4(1)
-1: mvcle 2,4,0
- b 4(1)
+ .globl __copy_from_user_asm
+__copy_from_user_asm:
+ lr %r5,%r3
+ sacf 512
+0: mvcle %r2,%r4,0
+ jo 0b
+1: sacf 0
+ lr %r2,%r5
+ br %r14
+2: sll %r4,1
+ srl %r4,1
+ lhi %r3,-4096
+ sll %r3,1
+ srl %r3,1
+ n %r3,__LC_TRANS_EXC_ADDR
+ sr %r3,%r4
+ jm 1b
+ j 0b
.section __ex_table,"a"
- .long 1b,0b
+ .long 0b,2b
.previous
.align 4
.text
- .globl __copy_to_user_fixup
-__copy_to_user_fixup:
- l 1,__LC_PGM_OLD_PSW+4
- sll 4,1
- srl 4,1
-0: lhi 5,-4096
- sll 5,1
- srl 5,1
- n 5,__LC_TRANS_EXC_ADDR
- sr 5,4
- bm 4(1)
-1: mvcle 4,2,0
- b 4(1)
+ .globl __copy_to_user_asm
+__copy_to_user_asm:
+ lr %r5,%r3
+ sacf 512
+0: mvcle %r4,%r2,0
+ jo 0b
+1: sacf 0
+ lr %r2,%r3
+ br %r14
+2: sll %r4,1
+ srl %r4,1
+ lhi %r5,-4096
+ sll %r5,1
+ srl %r5,1
+ n %r5,__LC_TRANS_EXC_ADDR
+ sr %r5,%r4
+ jm 1b
+ j 0b
.section __ex_table,"a"
- .long 1b,0b
+ .long 0b,2b
+ .previous
+
+ .align 4
+ .text
+ .globl __clear_user_asm
+__clear_user_asm:
+ lr %r4,%r2
+ lr %r5,%r3
+ sr %r2,%r2
+ sr %r3,%r3
+ sacf 512
+0: mvcle %r4,%r2,0
+ jo 0b
+1: sacf 0
+ lr %r2,%r3
+ br %r14
+2: sll %r4,1
+ srl %r4,1
+ lhi %r5,-4096
+ sll %r5,1
+ srl %r5,1
+ n %r5,__LC_TRANS_EXC_ADDR
+ sr %r5,%r4
+ jm 1b
+ j 0b
+ .section __ex_table,"a"
+ .long 0b,2b
.previous
return SIGSEGV; \
} while (0)
-static void display_emulation_not_implemented(char *instr)
+static void display_emulation_not_implemented(struct pt_regs *regs, char *instr)
{
struct pt_regs *regs;
__u16 *location;
if(sysctl_ieee_emulation_warnings)
#endif
{
- regs = current->thread.regs;
location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
printk("%s ieee fpu instruction not emulated "
"process name: %s pid: %d \n",
}
}
-static inline void emu_set_CC (int cc)
+static inline void emu_set_CC (struct pt_regs *regs, int cc)
{
- current->thread.regs->psw.mask =
- (current->thread.regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12);
+ regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12);
}
/*
* 2 : Result is greater than zero
* 3 : Result is NaN or INF
*/
-static inline void emu_set_CC_cs(int class, int sign)
+static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign)
{
switch (class) {
case FP_CLS_NORMAL:
case FP_CLS_INF:
- emu_set_CC(sign ? 1 : 2);
+ emu_set_CC(regs, sign ? 1 : 2);
break;
case FP_CLS_ZERO:
- emu_set_CC(0);
+ emu_set_CC(regs, 0);
break;
case FP_CLS_NAN:
- emu_set_CC(3);
+ emu_set_CC(regs, 3);
break;
}
}
/* Add long double */
-static int emu_axbr (int rx, int ry) {
+static int emu_axbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
FP_PACK_QP(&cvt.ld, QR);
current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(QR_c, QR_s);
+ emu_set_CC_cs(regs, QR_c, QR_s);
return _fex;
}
/* Add double */
-static int emu_adbr (int rx, int ry) {
+static int emu_adbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d);
FP_ADD_D(DR, DA, DB);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Add double */
-static int emu_adb (int rx, double *val) {
+static int emu_adb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DB, val);
FP_ADD_D(DR, DA, DB);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Add float */
-static int emu_aebr (int rx, int ry) {
+static int emu_aebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f);
FP_ADD_S(SR, SA, SB);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Add float */
-static int emu_aeb (int rx, float *val) {
+static int emu_aeb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SB, val);
FP_ADD_S(SR, SA, SB);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Compare long double */
-static int emu_cxbr (int rx, int ry) {
+static int emu_cxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QB);
mathemu_ldcv cvt;
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
return 0;
}
/* Compare double */
-static int emu_cdbr (int rx, int ry) {
+static int emu_cdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DB);
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
return 0;
}
/* Compare double */
-static int emu_cdb (int rx, double *val) {
+static int emu_cdb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DB);
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
return 0;
}
/* Compare float */
-static int emu_cebr (int rx, int ry) {
+static int emu_cebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SB);
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
return 0;
}
/* Compare float */
-static int emu_ceb (int rx, float *val) {
+static int emu_ceb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SB);
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
return 0;
}
/* Compare and signal long double */
-static int emu_kxbr (int rx, int ry) {
+static int emu_kxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QB);
FP_DECL_EX;
mathemu_ldcv cvt;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
if (IR == 3)
FP_SET_EXCEPTION (FP_EX_INVALID);
return _fex;
}
/* Compare and signal double */
-static int emu_kdbr (int rx, int ry) {
+static int emu_kdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DB);
FP_DECL_EX;
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
if (IR == 3)
FP_SET_EXCEPTION (FP_EX_INVALID);
return _fex;
}
/* Compare and signal double */
-static int emu_kdb (int rx, double *val) {
+static int emu_kdb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DB);
FP_DECL_EX;
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
if (IR == 3)
FP_SET_EXCEPTION (FP_EX_INVALID);
return _fex;
}
/* Compare and signal float */
-static int emu_kebr (int rx, int ry) {
+static int emu_kebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SB);
FP_DECL_EX;
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
if (IR == 3)
FP_SET_EXCEPTION (FP_EX_INVALID);
return _fex;
}
/* Compare and signal float */
-static int emu_keb (int rx, float *val) {
+static int emu_keb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SB);
FP_DECL_EX;
int IR;
* IR == -1 if DA < DB, IR == 0 if DA == DB,
* IR == 1 if DA > DB and IR == 3 if unorderded
*/
- emu_set_CC((IR == -1) ? 1 : (IR == 1) ? 2 : IR);
+ emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
if (IR == 3)
FP_SET_EXCEPTION (FP_EX_INVALID);
return _fex;
}
/* Convert from fixed long double */
-static int emu_cxfbr (int rx, int ry) {
+static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
int mode;
mode = current->thread.fp_regs.fpc & 3;
- si = current->thread.regs->gprs[ry];
+ si = regs->gprs[ry];
FP_FROM_INT_Q(QR, si, 32, int);
FP_PACK_QP(&cvt.ld, QR);
current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
}
/* Convert from fixed double */
-static int emu_cdfbr (int rx, int ry) {
+static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DR);
FP_DECL_EX;
__s32 si;
int mode;
mode = current->thread.fp_regs.fpc & 3;
- si = current->thread.regs->gprs[ry];
+ si = regs->gprs[ry];
FP_FROM_INT_D(DR, si, 32, int);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
return _fex;
}
/* Convert from fixed float */
-static int emu_cefbr (int rx, int ry) {
+static int emu_cefbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SR);
FP_DECL_EX;
__s32 si;
int mode;
mode = current->thread.fp_regs.fpc & 3;
- si = current->thread.regs->gprs[ry];
+ si = regs->gprs[ry];
FP_FROM_INT_S(SR, si, 32, int);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
return _fex;
}
/* Convert to fixed long double */
-static int emu_cfxbr (int rx, int ry, int mask) {
+static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) {
FP_DECL_Q(QA);
FP_DECL_EX;
mathemu_ldcv cvt;
cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
FP_UNPACK_QP(QA, &cvt.ld);
FP_TO_INT_ROUND_Q(si, QA, 32, 1);
- current->thread.regs->gprs[rx] = si;
- emu_set_CC_cs(QA_c, QA_s);
+ regs->gprs[rx] = si;
+ emu_set_CC_cs(regs, QA_c, QA_s);
return _fex;
}
/* Convert to fixed double */
-static int emu_cfdbr (int rx, int ry, int mask) {
+static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) {
FP_DECL_D(DA);
FP_DECL_EX;
__s32 si;
mode = mask - 4;
FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d);
FP_TO_INT_ROUND_D(si, DA, 32, 1);
- current->thread.regs->gprs[rx] = si;
- emu_set_CC_cs(DA_c, DA_s);
+ regs->gprs[rx] = si;
+ emu_set_CC_cs(regs, DA_c, DA_s);
return _fex;
}
/* Convert to fixed float */
-static int emu_cfebr (int rx, int ry, int mask) {
+static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) {
FP_DECL_S(SA);
FP_DECL_EX;
__s32 si;
mode = mask - 4;
FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f);
FP_TO_INT_ROUND_S(si, SA, 32, 1);
- current->thread.regs->gprs[rx] = si;
- emu_set_CC_cs(SA_c, SA_s);
+ regs->gprs[rx] = si;
+ emu_set_CC_cs(regs, SA_c, SA_s);
return _fex;
}
/* Divide long double */
-static int emu_dxbr (int rx, int ry) {
+static int emu_dxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Divide double */
-static int emu_ddbr (int rx, int ry) {
+static int emu_ddbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Divide double */
-static int emu_ddb (int rx, double *val) {
+static int emu_ddb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Divide float */
-static int emu_debr (int rx, int ry) {
+static int emu_debr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Divide float */
-static int emu_deb (int rx, float *val) {
+static int emu_deb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Divide to integer double */
-static int emu_didbr (int rx, int ry, int mask) {
- display_emulation_not_implemented("didbr");
+static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) {
+ display_emulation_not_implemented(regs, "didbr");
return 0;
}
/* Divide to integer float */
-static int emu_diebr (int rx, int ry, int mask) {
- display_emulation_not_implemented("diebr");
+static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) {
+ display_emulation_not_implemented(regs, "diebr");
return 0;
}
/* Extract fpc */
-static int emu_efpc (int rx, int ry) {
- current->thread.regs->gprs[rx] = current->thread.fp_regs.fpc;
+static int emu_efpc (struct pt_regs *regs, int rx, int ry) {
+ regs->gprs[rx] = current->thread.fp_regs.fpc;
return 0;
}
/* Load and test long double */
-static int emu_ltxbr (int rx, int ry) {
+static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) {
s390_fp_regs *fp_regs = ¤t->thread.fp_regs;
mathemu_ldcv cvt;
FP_DECL_Q(QA);
FP_UNPACK_QP(QA, &cvt.ld);
fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui;
- emu_set_CC_cs(QA_c, QA_s);
+ emu_set_CC_cs(regs, QA_c, QA_s);
return _fex;
}
/* Load and test double */
-static int emu_ltdbr (int rx, int ry) {
+static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) {
s390_fp_regs *fp_regs = ¤t->thread.fp_regs;
FP_DECL_D(DA);
FP_DECL_EX;
FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- emu_set_CC_cs(DA_c, DA_s);
+ emu_set_CC_cs(regs, DA_c, DA_s);
return _fex;
}
/* Load and test double */
-static int emu_ltebr (int rx, int ry) {
+static int emu_ltebr (struct pt_regs *regs, int rx, int ry) {
s390_fp_regs *fp_regs = ¤t->thread.fp_regs;
FP_DECL_S(SA);
FP_DECL_EX;
FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- emu_set_CC_cs(SA_c, SA_s);
+ emu_set_CC_cs(regs, SA_c, SA_s);
return _fex;
}
/* Load complement long double */
-static int emu_lcxbr (int rx, int ry) {
+static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
FP_PACK_QP(&cvt.ld, QR);
current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(QR_c, QR_s);
+ emu_set_CC_cs(regs, QR_c, QR_s);
return _fex;
}
/* Load complement double */
-static int emu_lcdbr (int rx, int ry) {
+static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d);
FP_NEG_D(DR, DA);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Load complement float */
-static int emu_lcebr (int rx, int ry) {
+static int emu_lcebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f);
FP_NEG_S(SR, SA);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Load floating point integer long double */
-static int emu_fixbr (int rx, int ry, int mask) {
+static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) {
s390_fp_regs *fp_regs = ¤t->thread.fp_regs;
FP_DECL_Q(QA);
FP_DECL_EX;
}
/* Load floating point integer double */
-static int emu_fidbr (int rx, int ry, int mask) {
+static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) {
/* FIXME: rounding mode !! */
s390_fp_regs *fp_regs = ¤t->thread.fp_regs;
FP_DECL_D(DA);
}
/* Load floating point integer float */
-static int emu_fiebr (int rx, int ry, int mask) {
+static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) {
s390_fp_regs *fp_regs = ¤t->thread.fp_regs;
FP_DECL_S(SA);
FP_DECL_EX;
}
/* Load lengthened double to long double */
-static int emu_lxdbr (int rx, int ry) {
+static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Load lengthened double to long double */
-static int emu_lxdb (int rx, double *val) {
+static int emu_lxdb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Load lengthened float to long double */
-static int emu_lxebr (int rx, int ry) {
+static int emu_lxebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Load lengthened float to long double */
-static int emu_lxeb (int rx, float *val) {
+static int emu_lxeb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Load lengthened float to double */
-static int emu_ldebr (int rx, int ry) {
+static int emu_ldebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Load lengthened float to double */
-static int emu_ldeb (int rx, float *val) {
+static int emu_ldeb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Load negative long double */
-static int emu_lnxbr (int rx, int ry) {
+static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
current->thread.fp_regs.fprs[rx+2].ui =
current->thread.fp_regs.fprs[ry+2].ui;
}
- emu_set_CC_cs(QR_c, QR_s);
+ emu_set_CC_cs(regs, QR_c, QR_s);
return _fex;
}
/* Load negative double */
-static int emu_lndbr (int rx, int ry) {
+static int emu_lndbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
} else
current->thread.fp_regs.fprs[rx].ui =
current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Load negative float */
-static int emu_lnebr (int rx, int ry) {
+static int emu_lnebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
} else
current->thread.fp_regs.fprs[rx].ui =
current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Load positive long double */
-static int emu_lpxbr (int rx, int ry) {
+static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
current->thread.fp_regs.fprs[rx+2].ui =
current->thread.fp_regs.fprs[ry+2].ui;
}
- emu_set_CC_cs(QR_c, QR_s);
+ emu_set_CC_cs(regs, QR_c, QR_s);
return _fex;
}
/* Load positive double */
-static int emu_lpdbr (int rx, int ry) {
+static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
} else
current->thread.fp_regs.fprs[rx].ui =
current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Load positive float */
-static int emu_lpebr (int rx, int ry) {
+static int emu_lpebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
} else
current->thread.fp_regs.fprs[rx].ui =
current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Load rounded long double to double */
-static int emu_ldxbr (int rx, int ry) {
+static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_D(DR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Load rounded long double to float */
-static int emu_lexbr (int rx, int ry) {
+static int emu_lexbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_S(SR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Load rounded double to float */
-static int emu_ledbr (int rx, int ry) {
+static int emu_ledbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Multiply long double */
-static int emu_mxbr (int rx, int ry) {
+static int emu_mxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Multiply double */
-static int emu_mdbr (int rx, int ry) {
+static int emu_mdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply double */
-static int emu_mdb (int rx, double *val) {
+static int emu_mdb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply double to long double */
-static int emu_mxdbr (int rx, int ry) {
+static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Multiply double to long double */
-static int emu_mxdb (int rx, long double *val) {
+static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) {
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
}
/* Multiply float */
-static int emu_meebr (int rx, int ry) {
+static int emu_meebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Multiply float */
-static int emu_meeb (int rx, float *val) {
+static int emu_meeb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Multiply float to double */
-static int emu_mdebr (int rx, int ry) {
+static int emu_mdebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply float to double */
-static int emu_mdeb (int rx, float *val) {
+static int emu_mdeb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply and add double */
-static int emu_madbr (int rx, int ry, int rz) {
+static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply and add double */
-static int emu_madb (int rx, double *val, int rz) {
+static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply and add float */
-static int emu_maebr (int rx, int ry, int rz) {
+static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Multiply and add float */
-static int emu_maeb (int rx, float *val, int rz) {
+static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Multiply and subtract double */
-static int emu_msdbr (int rx, int ry, int rz) {
+static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply and subtract double */
-static int emu_msdb (int rx, double *val, int rz) {
+static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
}
/* Multiply and subtract float */
-static int emu_msebr (int rx, int ry, int rz) {
+static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Multiply and subtract float */
-static int emu_mseb (int rx, float *val, int rz) {
+static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
}
/* Set floating point control word */
-static int emu_sfpc (int rx, int ry) {
+static int emu_sfpc (struct pt_regs *regs, int rx, int ry) {
__u32 temp;
- temp = current->thread.regs->gprs[rx];
+ temp = regs->gprs[rx];
if ((temp & ~FPC_VALID_MASK) != 0)
return SIGILL;
current->thread.fp_regs.fpc = temp;
}
/* Square root long double */
-static int emu_sqxbr (int rx, int ry) {
+static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
FP_PACK_QP(&cvt.ld, QR);
current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(QR_c, QR_s);
+ emu_set_CC_cs(regs, QR_c, QR_s);
return _fex;
}
/* Square root double */
-static int emu_sqdbr (int rx, int ry) {
+static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d);
FP_SQRT_D(DR, DA);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Square root double */
-static int emu_sqdb (int rx, double *val) {
+static int emu_sqdb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DA, val);
FP_SQRT_D(DR, DA);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Square root float */
-static int emu_sqebr (int rx, int ry) {
+static int emu_sqebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f);
FP_SQRT_S(SR, SA);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Square root float */
-static int emu_sqeb (int rx, float *val) {
+static int emu_sqeb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SA, val);
FP_SQRT_S(SR, SA);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Subtract long double */
-static int emu_sxbr (int rx, int ry) {
+static int emu_sxbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
FP_DECL_EX;
mathemu_ldcv cvt;
FP_PACK_QP(&cvt.ld, QR);
current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(QR_c, QR_s);
+ emu_set_CC_cs(regs, QR_c, QR_s);
return _fex;
}
/* Subtract double */
-static int emu_sdbr (int rx, int ry) {
+static int emu_sdbr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d);
FP_SUB_D(DR, DA, DB);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Subtract double */
-static int emu_sdb (int rx, double *val) {
+static int emu_sdb (struct pt_regs *regs, int rx, double *val) {
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_EX;
int mode;
FP_UNPACK_DP(DB, val);
FP_SUB_D(DR, DA, DB);
FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(DR_c, DR_s);
+ emu_set_CC_cs(regs, DR_c, DR_s);
return _fex;
}
/* Subtract float */
-static int emu_sebr (int rx, int ry) {
+static int emu_sebr (struct pt_regs *regs, int rx, int ry) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f);
FP_SUB_S(SR, SA, SB);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Subtract float */
-static int emu_seb (int rx, float *val) {
+static int emu_seb (struct pt_regs *regs, int rx, float *val) {
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_EX;
int mode;
FP_UNPACK_SP(SB, val);
FP_SUB_S(SR, SA, SB);
FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(SR_c, SR_s);
+ emu_set_CC_cs(regs, SR_c, SR_s);
return _fex;
}
/* Test data class long double */
-static int emu_tcxb (int rx, long val) {
+static int emu_tcxb (struct pt_regs *regs, int rx, long val) {
FP_DECL_Q(QA);
mathemu_ldcv cvt;
int bit;
}
if (!QA_s)
bit++;
- emu_set_CC(((__u32) val >> bit) & 1);
+ emu_set_CC(regs, ((__u32) val >> bit) & 1);
return 0;
}
/* Test data class double */
-static int emu_tcdb (int rx, long val) {
+static int emu_tcdb (struct pt_regs *regs, int rx, long val) {
FP_DECL_D(DA);
int bit;
}
if (!DA_s)
bit++;
- emu_set_CC(((__u32) val >> bit) & 1);
+ emu_set_CC(regs, ((__u32) val >> bit) & 1);
return 0;
}
/* Test data class float */
-static int emu_tceb (int rx, long val) {
+static int emu_tceb (struct pt_regs *regs, int rx, long val) {
FP_DECL_S(SA);
int bit;
}
if (!SA_s)
bit++;
- emu_set_CC(((__u32) val >> bit) & 1);
+ emu_set_CC(regs, ((__u32) val >> bit) & 1);
return 0;
}
emu_store_regd(opcode[3] & 15);
emu_store_regd((opcode[3] & 15) + 2);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *,int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(((opcode[3] >> 4) & 15) + 2);
emu_load_regd(opcode[3] & 15);
emu_store_regd((opcode[3] >> 4) & 15);
emu_store_regd(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(opcode[3] & 15);
break;
emu_store_rege((opcode[3] >> 4) & 15);
emu_store_rege(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_rege((opcode[3] >> 4) & 15);
emu_load_rege(opcode[3] & 15);
break;
emu_store_regd(opcode[3] & 15);
emu_store_regd((opcode[3] & 15) + 2);
/* call the emulation function */
- _fex = ((int (*)(int, int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(((opcode[3] >> 4) & 15) + 2);
emu_load_regd(opcode[3] & 15);
emu_store_regd((opcode[3] >> 4) & 15);
emu_store_regd(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
emu_load_regd((opcode[2] >> 4) & 15);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(opcode[3] & 15);
emu_store_rege((opcode[3] >> 4) & 15);
emu_store_rege(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
emu_load_rege((opcode[2] >> 4) & 15);
emu_load_rege((opcode[3] >> 4) & 15);
emu_load_rege(opcode[3] & 15);
/* call the emulation function */
if (opcode[3] & 0x20)
return SIGILL;
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(((opcode[3] >> 4) & 15) + 2);
break;
case 8: /* RRE format, cdfbr instruction */
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
break;
case 9: /* RRE format, cefbr instruction */
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_rege((opcode[3] >> 4) & 15);
break;
case 10: /* RRF format, cfxbr instruction */
emu_store_regd(opcode[3] & 15);
emu_store_regd((opcode[3] & 15) + 2);
/* call the emulation function */
- _fex = ((int (*)(int, int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
break;
case 11: /* RRF format, cfdbr instruction */
if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
return SIGILL;
emu_store_regd(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
break;
case 12: /* RRF format, cfebr instruction */
if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
return SIGILL;
emu_store_rege(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
break;
case 13: /* RRE format, ldxbr & mdxbr instruction */
/* double store but long double load */
emu_store_regd((opcode[3] >> 4) & 15);
emu_store_regd(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(((opcode[3] >> 4) & 15) + 2);
break;
emu_store_rege((opcode[3] >> 4) & 15);
emu_store_rege(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
emu_load_regd(((opcode[3] >> 4) & 15) + 2);
break;
emu_store_rege((opcode[3] >> 4) & 15);
emu_store_rege(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
break;
case 16: /* RRE format, ldxbr instruction */
emu_store_regd(opcode[3] & 15);
emu_store_regd((opcode[3] & 15) + 2);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_regd((opcode[3] >> 4) & 15);
break;
case 17: /* RRE format, ldxbr instruction */
emu_store_regd(opcode[3] & 15);
emu_store_regd((opcode[3] & 15) + 2);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_rege((opcode[3] >> 4) & 15);
break;
case 18: /* RRE format, ledbr instruction */
/* double store but float load */
emu_store_regd(opcode[3] & 15);
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
emu_load_rege((opcode[3] >> 4) & 15);
break;
case 19: /* RRE format, efpc & sfpc instruction */
/* call the emulation function */
- _fex = ((int (*)(int, int)) jump_table[opcode[1]])
- (opcode[3] >> 4, opcode[3] & 15);
+ _fex = ((int (*)(struct pt_regs *, int, int))
+ jump_table[opcode[1]])
+ (regs, opcode[3] >> 4, opcode[3] & 15);
break;
default: /* invalid operation */
return SIGILL;
dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_copy_from_user(&temp, dxb, 8);
/* call the emulation function */
- _fex = ((int (*)(int, double *)) jump_table[opcode[5]])
- (opcode[1] >> 4, (double *) &temp);
+ _fex = ((int (*)(struct pt_regs *, int, double *))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (double *) &temp);
emu_load_regd((opcode[1] >> 4) & 15);
break;
}
dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_get_user(temp, dxb);
/* call the emulation function */
- _fex = ((int (*)(int, float *)) jump_table[opcode[5]])
- (opcode[1] >> 4, (float *) &temp);
+ _fex = ((int (*)(struct pt_regs *, int, float *))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (float *) &temp);
emu_load_rege((opcode[1] >> 4) & 15);
break;
}
dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_copy_from_user(&temp, dxb, 8);
/* call the emulation function */
- _fex = ((int (*)(int, double *, int)) jump_table[opcode[5]])
- (opcode[1] >> 4, (double *) &temp, opcode[4] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, double *, int))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4);
emu_load_regd((opcode[1] >> 4) & 15);
break;
}
dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_get_user(temp, dxb);
/* call the emulation function */
- _fex = ((int (*)(int, float *, int)) jump_table[opcode[5]])
- (opcode[1] >> 4, (float *) &temp, opcode[4] >> 4);
+ _fex = ((int (*)(struct pt_regs *, int, float *, int))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4);
emu_load_rege((opcode[4] >> 4) & 15);
break;
}
dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_copy_from_user(&temp, dxb, 8);
/* call the emulation function */
- _fex = ((int (*)(int, double *)) jump_table[opcode[5]])
- (opcode[1] >> 4, (double *) &temp);
+ _fex = ((int (*)(struct pt_regs *, int, double *))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (double *) &temp);
emu_load_regd((opcode[1] >> 4) & 15);
emu_load_regd(((opcode[1] >> 4) & 15) + 2);
break;
dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_get_user(temp, dxb);
/* call the emulation function */
- _fex = ((int (*)(int, float *)) jump_table[opcode[5]])
- (opcode[1] >> 4, (float *) &temp);
+ _fex = ((int (*)(struct pt_regs *, int, float *))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (float *) &temp);
emu_load_regd((opcode[1] >> 4) & 15);
break;
}
dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
mathemu_get_user(temp, dxb);
/* call the emulation function */
- _fex = ((int (*)(int, float *)) jump_table[opcode[5]])
- (opcode[1] >> 4, (float *) &temp);
+ _fex = ((int (*)(struct pt_regs *, int, float *))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, (float *) &temp);
emu_load_regd((opcode[1] >> 4) & 15);
emu_load_regd(((opcode[1] >> 4) & 15) + 2);
break;
opc = *((__u32 *) opcode);
dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
/* call the emulation function */
- _fex = ((int (*)(int, long)) jump_table[opcode[5]])
- (opcode[1] >> 4, dxb);
+ _fex = ((int (*)(struct pt_regs *, int, long))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, dxb);
break;
}
case 9: /* RXE format, RX address used as int value */ {
opc = *((__u32 *) opcode);
dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
/* call the emulation function */
- _fex = ((int (*)(int, long)) jump_table[opcode[5]])
- (opcode[1] >> 4, dxb);
+ _fex = ((int (*)(struct pt_regs *, int, long))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, dxb);
break;
}
case 10: /* RXE format, RX address used as int value */ {
opc = *((__u32 *) opcode);
dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
/* call the emulation function */
- _fex = ((int (*)(int, long)) jump_table[opcode[5]])
- (opcode[1] >> 4, dxb);
+ _fex = ((int (*)(struct pt_regs *, int, long))
+ jump_table[opcode[5]])
+ (regs, opcode[1] >> 4, dxb);
break;
}
default: /* invalid operation */
search_exception_table(unsigned long addr)
{
unsigned long ret = 0;
- unsigned long flags;
#ifndef CONFIG_MODULES
addr &= 0x7fffffff; /* remove amode bit from address */
if (ret) ret = FIX_PSW(ret);
return ret;
#else
+ unsigned long flags;
/* The kernel is the last "module" -- no need to treat it special. */
struct module *mp;
addr &= 0x7fffffff; /* remove amode bit from address */
#endif
extern void die(const char *,struct pt_regs *,long);
-static void force_sigsegv(struct task_struct *tsk, int code, void *address);
extern spinlock_t timerlist_lock;
}
}
+/*
+ * Check which address space is addressed by the access
+ * register in S390_lowcore.exc_access_id.
+ * Returns 1 for user space and 0 for kernel space.
+ */
+static int __check_access_register(struct pt_regs *regs, int error_code)
+{
+ int areg = S390_lowcore.exc_access_id;
+
+ if (areg == 0)
+ /* Access via access register 0 -> kernel address */
+ return 0;
+ if (regs && areg < NUM_ACRS && regs->acrs[areg] <= 1)
+ /*
+ * access register contains 0 -> kernel address,
+ * access register contains 1 -> user space address
+ */
+ return regs->acrs[areg];
+
+ /* Something unhealthy was done with the access registers... */
+ die("page fault via unknown access register", regs, error_code);
+ do_exit(SIGKILL);
+ return 0;
+}
+
+/*
+ * Check which address space the address belongs to.
+ * Returns 1 for user space and 0 for kernel space.
+ */
+static inline int check_user_space(struct pt_regs *regs, int error_code)
+{
+ /*
+ * The lowest two bits of S390_lowcore.trans_exc_code indicate
+ * which paging table was used:
+ * 0: Primary Segment Table Descriptor
+ * 1: STD determined via access register
+ * 2: Secondary Segment Table Descriptor
+ * 3: Home Segment Table Descriptor
+ */
+ int descriptor = S390_lowcore.trans_exc_code & 3;
+ if (descriptor == 1)
+ return __check_access_register(regs, error_code);
+ return descriptor >> 1;
+}
+
+/*
+ * Send SIGSEGV to task. This is an external routine
+ * to keep the stack usage of do_page_fault small.
+ */
+static void force_sigsegv(struct pt_regs *regs, unsigned long error_code,
+ int si_code, unsigned long address)
+{
+ struct siginfo si;
+
+#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
+#if defined(CONFIG_SYSCTL)
+ if (sysctl_userprocess_debug)
+#endif
+ {
+ printk("User process fault: interruption code 0x%lX\n",
+ error_code);
+ printk("failing address: %lX\n", address);
+ show_regs(regs);
+ }
+#endif
+ si.si_signo = SIGSEGV;
+ si.si_code = si_code;
+ si.si_addr = (void *) address;
+ force_sig_info(SIGSEGV, &si, current);
+}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* error_code:
- * ****0004 Protection -> Write-Protection (suprression)
- * ****0010 Segment translation -> Not present (nullification)
- * ****0011 Page translation -> Not present (nullification)
+ * 04 Protection -> Write-Protection (suprression)
+ * 10 Segment translation -> Not present (nullification)
+ * 11 Page translation -> Not present (nullification)
*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long address;
+ int user_address;
unsigned long fixup;
- int write;
int si_code = SEGV_MAPERR;
- int kernel_address = 0;
tsk = current;
mm = tsk->mm;
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
- if ((error_code & 0xff) == 4 && !(S390_lowcore.trans_exc_code & 4)) {
+ if (error_code == 4 && !(S390_lowcore.trans_exc_code & 4)) {
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (!(regs->psw.mask & PSW_PROBLEM_STATE)) {
address = 0;
- kernel_address = 1;
+ user_address = 0;
goto no_context;
}
* more specific the segment and page table portion of
* the address
*/
-
address = S390_lowcore.trans_exc_code&0x7ffff000;
-
-
- /*
- * Check which address space the address belongs to
- */
- switch (S390_lowcore.trans_exc_code & 3)
- {
- case 0: /* Primary Segment Table Descriptor */
- kernel_address = 1;
- goto no_context;
-
- case 1: /* STD determined via access register */
- if (S390_lowcore.exc_access_id == 0)
- {
- kernel_address = 1;
- goto no_context;
- }
- if (regs && S390_lowcore.exc_access_id < NUM_ACRS)
- {
- if (regs->acrs[S390_lowcore.exc_access_id] == 0)
- {
- kernel_address = 1;
- goto no_context;
- }
- if (regs->acrs[S390_lowcore.exc_access_id] == 1)
- {
- /* user space address */
- break;
- }
- }
- die("page fault via unknown access register", regs, error_code);
- do_exit(SIGKILL);
- break;
-
- case 2: /* Secondary Segment Table Descriptor */
- case 3: /* Home Segment Table Descriptor */
- /* user space address */
- break;
- }
+ user_address = check_user_space(regs, error_code);
/*
- * Check whether we have a user MM in the first place.
+ * Verify that the fault happened in user space, that
+ * we are not in an interrupt and that there is a
+ * user context.
*/
- if (in_interrupt() || !mm || !(regs->psw.mask & _PSW_IO_MASK_BIT))
+ if (user_address == 0 || in_interrupt() || !mm)
goto no_context;
/*
* task's user address space, so we can switch on the
* interrupts again and then search the VMAs
*/
-
__sti();
down_read(&mm->mmap_sem);
* we can handle it..
*/
good_area:
- write = 0;
si_code = SEGV_ACCERR;
+ if (error_code != 4) {
+ /* page not present, check vm flags */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ }
- switch (error_code & 0xFF) {
- case 0x04: /* write, present*/
- write = 1;
- break;
- case 0x10: /* not present*/
- case 0x11: /* not present*/
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- break;
- default:
- printk("code should be 4, 10 or 11 (%lX) \n",error_code&0xFF);
- goto bad_area;
- }
-
- survive:
+survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- switch (handle_mm_fault(mm, vma, address, write)) {
+ switch (handle_mm_fault(mm, vma, address, error_code == 4)) {
case 1:
tsk->min_flt++;
break;
if (regs->psw.mask & PSW_PROBLEM_STATE) {
tsk->thread.prot_addr = address;
tsk->thread.trap_no = error_code;
-#ifndef CONFIG_SYSCTL
-#ifdef CONFIG_PROCESS_DEBUG
- printk("User process fault: interruption code 0x%lX\n",error_code);
- printk("failing address: %lX\n",address);
- show_regs(regs);
-#endif
-#else
- if (sysctl_userprocess_debug) {
- printk("User process fault: interruption code 0x%lX\n",
- error_code);
- printk("failing address: %lX\n", address);
- show_regs(regs);
- }
-#endif
-
- force_sigsegv(tsk, si_code, (void *)address);
+ force_sigsegv(regs, error_code, si_code, address);
return;
}
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
-
- if (kernel_address)
+ if (user_address == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %08lx\n", address);
else
out_of_memory:
up_read(&mm->mmap_sem);
if (tsk->pid == 1) {
- tsk->policy |= SCHED_YIELD;
- schedule();
- down_read(&mm->mmap_sem);
+ yield();
goto survive;
}
printk("VM: killing process %s\n", tsk->comm);
goto no_context;
}
-/*
- * Send SIGSEGV to task. This is an external routine
- * to keep the stack usage of do_page_fault small.
- */
-static void force_sigsegv(struct task_struct *tsk, int code, void *address)
+void do_protection_exception(struct pt_regs *regs, unsigned long error_code)
{
- struct siginfo si;
- si.si_signo = SIGSEGV;
- si.si_code = code;
- si.si_addr = address;
- force_sig_info(SIGSEGV, &si, tsk);
+ regs->psw.addr -= (error_code >> 16);
+ do_exception(regs, 4);
+}
+
+void do_segment_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ do_exception(regs, 0x10);
+}
+
+void do_page_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ do_exception(regs, 0x11);
}
typedef struct _pseudo_wait_t {
pseudo_wait_t wait_struct;
pseudo_wait_t *ptr, *last, *next;
unsigned long address;
- int kernel_address;
/*
* get the failing address
* while we are running disabled. VM will then swap
* in the page synchronously.
*/
- kernel_address = 0;
- switch (S390_lowcore.trans_exc_code & 3) {
- case 0: /* Primary Segment Table Descriptor */
- kernel_address = 1;
- break;
- case 1: /* STD determined via access register */
- if (S390_lowcore.exc_access_id == 0 ||
- regs->acrs[S390_lowcore.exc_access_id]==0)
- kernel_address = 1;
- break;
- case 2: /* Secondary Segment Table Descriptor */
- case 3: /* Home Segment Table Descriptor */
- break;
- }
- if (kernel_address)
+ if (check_user_space(regs, error_code) == 0)
/* dereference a virtual kernel address */
__asm__ __volatile__ (
" ic 0,0(%0)"
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
+#include <asm/tlbflush.h>
mmu_gather_t mmu_gathers[NR_CPUS];
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-int do_check_pgt_cache(int low, int high)
-{
- int freed = 0;
- if(pgtable_cache_size > high) {
- do {
- if(pgd_quicklist) {
- free_pgd_slow(get_pgd_fast());
- freed += 2;
- }
- if(pmd_quicklist) {
- pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
- freed++;
- }
- if(pte_quicklist) {
- pte_free_slow(pte_alloc_one_fast(NULL, 0));
- freed++;
- }
- } while(pgtable_cache_size > low);
- }
- return freed;
-}
-
void show_mem(void)
{
int i, total = 0, reserved = 0;
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
- printk("%ld pages in page table cache\n",pgtable_cache_size);
}
/* References to section boundaries */
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
if (address >= end)
BUG();
do {
- pte_t * pte = pte_alloc(&init_mm, pmd, address);
+ pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
*(.fixup)
*(.gnu.warning)
} = 0x0700
+
+ _etext = .; /* End of text section */
+
.rodata : { *(.rodata) }
.kstrtab : { *(.kstrtab) }
__ksymtab : { *(__ksymtab) }
__stop___ksymtab = .;
- __start___kallsyms = .; /* All kernel symbols */
- __kallsyms : { *(__kallsyms) }
- __stop___kallsyms = .;
-
. = ALIGN(1048576); /* VM shared segments are 1MB aligned */
- _etext = .; /* End of text section */
+ _eshared = .; /* End of shareable data */
.data : { /* Data */
*(.data)
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
- . = ALIGN(4096);
- __init_end = .;
-
+ . = ALIGN(256);
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
*(.initcall7.init)
}
__initcall_end = .;
+ . = ALIGN(256);
+ __per_cpu_start = .;
+ .date.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
*(.fixup)
*(.gnu.warning)
} = 0x0700
+
+ _etext = .; /* End of text section */
+
.rodata : { *(.rodata) *(.rodata.*) }
.kstrtab : { *(.kstrtab) }
__ksymtab : { *(__ksymtab) }
__stop___ksymtab = .;
- __start___kallsyms = .; /* All kernel symbols */
- __kallsyms : { *(__kallsyms) }
- __stop___kallsyms = .;
-
- _etext = .; /* End of text section */
-
.data : { /* Data */
*(.data)
CONSTRUCTORS
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
- . = ALIGN(4096);
- __init_end = .;
-
+ . = ALIGN(256);
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
*(.initcall7.init)
}
__initcall_end = .;
+ . = ALIGN(256);
+ __per_cpu_start = .;
+ .date.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
. = ALIGN(4096);
.data.page_aligned : { *(.data.idt) }
-
__bss_start = .; /* BSS */
.bss : {
*(.bss)
IPL device.
CONFIG_IPL_TAPE
- Select this option if you want to IPL the image from a Tape.
+ Select "tape" if you want to IPL the image from a Tape.
+
+ Select "vm_reader" if you are running under VM/ESA and want
+ to IPL the image from the emulated card reader.
CONFIG_FAST_IRQ
Select this option in order to get the interrupts processed faster
interrupts which will also be processed before leaving the interrupt
context. This speeds up the I/O a lot. Say "Y".
+CONFIG_MACHCHK_WARNING
+ Select this option if you want the machine check handler on IBM S/390 or
+ zSeries to process warning machine checks (e.g. on power failures).
+ If unsure, say "Y".
+
+CONFIG_CHSC
+ Select this option if you want the s390 common I/O layer to use information
+ obtained by channel subsystem calls. This will enable Linux to process link
+ failures and resource accessibility events. Moreover, if you have procfs
+ enabled, you'll be able to toggle chpids logically offline and online. Even
+ if you don't understand what this means, you should say "Y".
+
CONFIG_S390_SUPPORT
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
a debugging option; you probably do not want to set it unless you
are an S390 port maintainer.
+CONFIG_PFAULT
+ Select this option, if you want to use PFAULT pseudo page fault
+ handling under VM. If running native or in LPAR, this option
+ has no effect. If your VM does not support PFAULT, PAGEEX
+ pseudo page fault handling will be used.
+ Note that VM 4.2 supports PFAULT but has a bug in its
+ implementation that causes some problems.
+ Everybody who wants to run Linux under VM != VM4.2 should select
+ this option.
+
+CONFIG_SHARED_KERNEL
+ Select this option, if you want to share the text segment of the
+ Linux kernel between different VM guests. This reduces memory
+ usage with lots of guests but greatly increases kernel size.
+ You should only select this option if you know what you are
+ doing and want to exploit this feature.
+
+CONFIG_QDIO
+ This driver provides the Queued Direct I/O base support for the
+ IBM S/390 (G5 and G6) and eServer zSeries (z800 and z900).
+
+ For details please refer to the documentation provided by IBM at
+ <http://www10.software.ibm.com/developerworks/opensource/linux390>
+
+ This driver is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called qdio.o. If you want to compile it as a
+ module, say M here and read <file:Documentation/modules.txt>.
+
+ If unsure, say Y.
+
+Performance statistics for QDIO base support
+CONFIG_QDIO_PERF_STATS
+ Say Y here to get performance statistics in /proc/qdio_perf
+
+ If unsure, say N.
OBJCOPY=$(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S
LDFLAGS=-e start
ifeq ($(CONFIG_SHARED_KERNEL),y)
-LINKFLAGS =-T $(TOPDIR)/arch/s390x/vmlinux-shared.lds $(LDFLAGS)
+ LINKSCRIPT := arch/s390x/vmlinux-shared.lds
else
-LINKFLAGS =-T $(TOPDIR)/arch/s390x/vmlinux.lds $(LDFLAGS)
+ LINKSCRIPT := arch/s390x/vmlinux.lds
endif
+LINKFLAGS =-T $(TOPDIR)/$(LINKSCRIPT) $(LDFLAGS)
MODFLAGS += -fpic
CFLAGS_PIPE := -pipe
HEAD := arch/s390x/kernel/head.o arch/s390x/kernel/init_task.o
-SUBDIRS := $(SUBDIRS) arch/s390x/mm arch/s390x/kernel arch/s390x/lib \
- drivers/s390
+SUBDIRS += arch/s390x/mm arch/s390x/kernel arch/s390x/lib drivers/s390
CORE_FILES := arch/s390x/mm/mm.o arch/s390x/kernel/kernel.o $(CORE_FILES)
DRIVERS := $(DRIVERS) drivers/s390/built-in.o
LIBS := $(TOPDIR)/arch/s390x/lib/lib.a $(LIBS) $(TOPDIR)/arch/s390x/lib/lib.a
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+vmlinux: $(LINKSCRIPT)
+
image: vmlinux
@$(MAKEBOOT) image
+install: vmlinux
+ @$(MAKEBOOT) BOOTIMAGE=image install
+
archclean:
@$(MAKEBOOT) clean
+ $(MAKE) -C arch/$(ARCH)/kernel clean
archmrproper:
O_TARGET :=
-include $(TOPDIR)/Rules.make
-
EXTRA_AFLAGS := -traditional
+include $(TOPDIR)/Rules.make
+
%.lnk: %.o
$(LD) -Ttext 0x0 -o $@ $<
clean:
rm -f image listing iplfba.boot ipleckd.boot ipldump.boot
+install: $(CONFIGURE) $(BOOTIMAGE)
+ sh -x ./install.sh $(KERNELRELEASE) $(BOOTIMAGE) $(TOPDIR)/System.map $(TOPDIR)/Kerntypes "$(INSTALL_PATH)"
--- /dev/null
+#!/bin/sh
+#
+# arch/s390x/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for s390 architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+# User may have a custom install script
+
+if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
+if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+
+# Default install - same as make zlilo
+
+if [ -f $4/vmlinuz ]; then
+ mv $4/vmlinuz $4/vmlinuz.old
+fi
+
+if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
+fi
+
+cat $2 > $4/vmlinuz
+cp $3 $4/System.map
define_bool CONFIG_ISA n
define_bool CONFIG_EISA n
define_bool CONFIG_MCA n
-define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
-define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
+define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
+define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
define_bool CONFIG_GENERIC_BUST_SPINLOCK n
mainmenu_name "Linux Kernel Configuration"
define_bool CONFIG_ARCH_S390X y
source init/Config.in
+
mainmenu_option next_comment
comment 'Processor type and features'
bool 'Symmetric multi-processing support' CONFIG_SMP
endmenu
mainmenu_option next_comment
-comment 'General setup'
+comment 'Base setup'
bool 'Fast IRQ handling' CONFIG_FAST_IRQ
+bool 'Process warning machine checks' CONFIG_MACHCHK_WARNING
+bool 'Use chscs for Common I/O' CONFIG_CHSC
+
+tristate 'QDIO support' CONFIG_QDIO
+ if [ "$CONFIG_QDIO" != "n" ]; then
+ bool ' Performance statistics in /proc' CONFIG_QDIO_PERF_STATS
+ fi
+
bool 'Builtin IPL record support' CONFIG_IPL
if [ "$CONFIG_IPL" = "y" ]; then
choice 'IPL method generated into head.S' \
bool 'VM shared kernel support' CONFIG_SHARED_KERNEL
endmenu
+mainmenu_option next_comment
+comment 'SCSI support'
+
+tristate 'SCSI support' CONFIG_SCSI
+
+if [ "$CONFIG_SCSI" != "n" ]; then
+ source drivers/scsi/Config.in
+fi
+endmenu
source drivers/s390/Config.in
comment 'Kernel hacking'
#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC
-if [ "$CONFIG_CTC" = "y" ]; then
- bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG
-fi
+#if [ "$CONFIG_CTC" = "y" ]; then
+# bool 'Remote GDB kernel debugging' CONFIG_REMOTE_DEBUG
+#fi
bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
endmenu
# CONFIG_ISA is not set
# CONFIG_EISA is not set
# CONFIG_MCA is not set
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
-CONFIG_GENERIC_BUST_SPINLOCK=n
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_GENERIC_BUST_SPINLOCK is not set
CONFIG_ARCH_S390=y
CONFIG_ARCH_S390X=y
CONFIG_EXPERIMENTAL=y
#
-# Processor type and features
+# General setup
#
-CONFIG_SMP=y
-CONFIG_S390_SUPPORT=y
-CONFIG_BINFMT_ELF32=y
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
#
# Loadable module support
CONFIG_KMOD=y
#
-# General setup
+# Processor type and features
+#
+CONFIG_SMP=y
+CONFIG_S390_SUPPORT=y
+CONFIG_BINFMT_ELF32=y
+
+#
+# Base setup
#
CONFIG_FAST_IRQ=y
+CONFIG_MACHCHK_WARNING=y
+CONFIG_CHSC=y
+CONFIG_QDIO=m
+# CONFIG_QDIO_PERF_STATS is not set
CONFIG_IPL=y
# CONFIG_IPL_TAPE is not set
CONFIG_IPL_VM=y
-CONFIG_NET=y
-CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
CONFIG_KCORE_ELF=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set
+#
+# SCSI support
+#
+CONFIG_SCSI=m
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_SR_EXTRA_DEVS=10
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_REPORT_LUNS is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_MEGARAID is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_SCSI_PCMCIA is not set
+
#
# Block device drivers
#
#
# S/390 tape hardware support
#
-CONFIG_S390_TAPE_3490=y
-CONFIG_S390_TAPE_3480=y
+CONFIG_S390_TAPE_3490=m
+CONFIG_S390_TAPE_3480=m
#
# Network device drivers
#
CONFIG_CHANDEV=y
CONFIG_HOTPLUG=y
+CONFIG_LCS=m
CONFIG_CTC=m
CONFIG_IUCV=m
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
-CONFIG_NETLINK=y
-# CONFIG_RTNETLINK is not set
# CONFIG_NETLINK_DEV is not set
# CONFIG_NETFILTER is not set
# CONFIG_FILTER is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
CONFIG_IPV6=m
-# CONFIG_IPV6_NETLINK is not set
# CONFIG_KHTTPD is not set
# CONFIG_ATM is not set
+# CONFIG_VLAN_8021Q is not set
#
#
#
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
+
+#
+# Appletalk devices
+#
+# CONFIG_DEV_APPLETALK is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_FASTROUTE is not set
+CONFIG_NET_FASTROUTE=y
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# File systems
#
# CONFIG_QUOTA is not set
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_BFS_FS is not set
-# CONFIG_CMS_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
+CONFIG_EXT3_FS=y
+CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
# CONFIG_FAT_FS is not set
# CONFIG_MSDOS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_TMPFS is not set
-# CONFIG_RAMFS is not set
+CONFIG_RAMFS=y
# CONFIG_ISO9660_FS is not set
# CONFIG_JOLIET is not set
# CONFIG_ZISOFS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
# CONFIG_MINIX_FS is not set
-# CONFIG_FREEVXFS_FS is not set
+# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS_DEBUG is not set
# CONFIG_HPFS_FS is not set
# CONFIG_ROOT_NFS is not set
# CONFIG_NFSD is not set
# CONFIG_NFSD_V3 is not set
+# CONFIG_NFSD_TCP is not set
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
+# CONFIG_EXPORTFS is not set
# CONFIG_SMB_FS is not set
# CONFIG_NCP_FS is not set
# CONFIG_NCPFS_PACKET_SIGNING is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
# CONFIG_ZISOFS_FS is not set
-# CONFIG_ZLIB_FS_INFLATE is not set
#
# Partition Types
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
# CONFIG_SMB_NLS is not set
# CONFIG_NLS is not set
# Kernel hacking
#
CONFIG_MAGIC_SYSRQ=y
+
+#
+# Library routines
+#
+# CONFIG_CRC32 is not set
+# CONFIG_ZLIB_INFLATE is not set
+# CONFIG_ZLIB_DEFLATE is not set
# Makefile for the linux kernel.
#
-O_TARGET := kernel.o
EXTRA_TARGETS := head.o init_task.o
+EXTRA_AFLAGS := -traditional
+
+O_TARGET := kernel.o
export-objs := debug.o ebcdic.o irq.o s390_ext.o smp.o s390_ksyms.o \
exec32.o
#
obj-$(CONFIG_REMOTE_DEBUG) += gdb-stub.o #gdb-low.o
-obj-$(CONFIG_S390_SUPPORT) += linux32.o signal32.o ioctl32.o wrapper32.o exec32.o
+obj-$(CONFIG_S390_SUPPORT) += linux32.o signal32.o ioctl32.o wrapper32.o \
+ exec32.o exec_domain32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
-EXTRA_AFLAGS := -traditional
-
include $(TOPDIR)/Rules.make
#
#
binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
+.PHONY: asm-offsets.h
+
+entry.S: asm-offsets.h
+
+#
+# Automatic offset generation for assembler files.
+#
+asm-offsets.h: asm-offsets.c
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -S $< -o - | grep -- "->" | \
+ (echo "#ifndef __ASM_OFFSETS_H__"; \
+ echo "#define __ASM_OFFSETS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY"; \
+ echo " *"; \
+ echo " * This file was generated by arch/s390/kernel/Makefile"; \
+ echo " */"; \
+ sed -e "s:^->\([^ ]*\) \([^ ]*\) \(.*\):#define \\1 \\2 /* \\3*/:" \
+ -e "s:->::"; \
+ echo "#endif" \
+ ) > asm-offsets.h
+
+clean:
+ rm -f asm-offsets.h
+
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+
+/* Use marker if you need to separate the values later */
+
+#define DEFINE(sym, val, marker) \
+ asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),);
+ DEFINE(__THREAD_ar2, offsetof(struct task_struct, thread.ar2),);
+ DEFINE(__THREAD_ar4, offsetof(struct task_struct, thread.ar4),);
+ DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
+ DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
+ BLANK();
+ DEFINE(__TI_task, offsetof(struct thread_info, task),);
+ DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
+ DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
+ DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
+ DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
+ return 0;
+}
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
-#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+#define SET_PERSONALITY(ex, ibcs2) \
+do { \
+ if (ibcs2) \
+ set_personality(PER_SVR4); \
+ else if (current->personality != PER_LINUX32) \
+ set_personality(PER_LINUX); \
+} while (0)
#endif
#include "linux32.h"
#include <linux/module.h>
#include <linux/config.h>
#include <linux/elfcore.h>
+#include <linux/binfmts.h>
int setup_arg_pages32(struct linux_binprm *bprm);
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
+ .globl _sb_findmap
+_sb_findmap:
+ .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+
size_t user_len, loff_t * offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
-static struct proc_dir_entry
-*debug_create_proc_dir_entry(struct proc_dir_entry *root,
- const char *name, mode_t mode,
- struct file_operations *fops);
-static void debug_delete_proc_dir_entry(struct proc_dir_entry *root,
- struct proc_dir_entry *entry);
static debug_info_t* debug_info_create(char *name, int page_order, int nr_areas, int buf_size);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
static debug_info_t *debug_area_first = NULL;
static debug_info_t *debug_area_last = NULL;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98))
-static struct semaphore debug_lock = MUTEX;
-#else
DECLARE_MUTEX(debug_lock);
-#endif
static int initialized = 0;
strncpy(rc->name, name, MIN(strlen(name), (DEBUG_MAX_PROCF_LEN - 1)));
rc->name[MIN(strlen(name), (DEBUG_MAX_PROCF_LEN - 1))] = 0;
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
+#ifdef CONFIG_PROC_FS
memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct proc_dir_entry*));
+#endif /* CONFIG_PROC_FS */
atomic_set(&(rc->ref_count), 0);
return rc;
/* create proc rood directory */
-
rc->proc_root_entry = proc_mkdir(rc->name, debug_proc_root_entry);
- /* append new element to linked list */
- if(debug_area_first == NULL){
+ /* append new element to linked list */
+ if (debug_area_first == NULL) {
/* first element in list */
debug_area_first = rc;
rc->prev = NULL;
- }
- else{
+ } else {
/* append element to end of list */
debug_area_last->next = rc;
rc->prev = debug_area_last;
if (!db_info)
return;
if (atomic_dec_and_test(&db_info->ref_count)) {
+#ifdef DEBUG
printk(KERN_INFO "debug: freeing debug area %p (%s)\n",
db_info, db_info->name);
+#endif
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
- if (db_info->views[i] != NULL)
- debug_delete_proc_dir_entry
- (db_info->proc_root_entry,
- db_info->proc_entries[i]);
+ if (db_info->views[i] == NULL)
+ continue;
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(db_info->proc_entries[i]->name,
+ db_info->proc_root_entry);
+#endif
}
- debug_delete_proc_dir_entry(debug_proc_root_entry,
- db_info->proc_root_entry);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(db_info->proc_root_entry->name,
+ debug_proc_root_entry);
+#endif
if(db_info == debug_area_first)
debug_area_first = db_info->next;
if(db_info == debug_area_last)
debug_info_snapshot = debug_info_copy(debug_info);
if(!debug_info_snapshot){
+#ifdef DEBUG
printk(KERN_ERR "debug_open: debug_info_copy failed (out of mem)\n");
+#endif
rc = -ENOMEM;
goto out;
}
if ((file->private_data =
kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) {
+#ifdef DEBUG
printk(KERN_ERR "debug_open: kmalloc failed\n");
+#endif
debug_info_free(debug_info_snapshot);
rc = -ENOMEM;
goto out;
return 0; /* success */
}
-/*
- * debug_create_proc_dir_entry:
- * - initializes proc-dir-entry and registers it
- */
-
-static struct proc_dir_entry *debug_create_proc_dir_entry
- (struct proc_dir_entry *root, const char *name, mode_t mode,
- struct file_operations *fops)
-{
- struct proc_dir_entry *rc = create_proc_entry(name, mode, root);
- if (rc && fops)
- rc->proc_fops = fops;
- return rc;
-}
-
-
-/*
- * delete_proc_dir_entry:
- */
-
-static void debug_delete_proc_dir_entry
- (struct proc_dir_entry *root, struct proc_dir_entry *proc_entry)
-{
- remove_proc_entry(proc_entry->name, root);
-}
-
/*
* debug_register:
* - creates and initializes debug area for the caller
goto out;
debug_register_view(rc, &debug_level_view);
debug_register_view(rc, &debug_flush_view);
+#ifdef DEBUG
printk(KERN_INFO
"debug: reserved %d areas of %d pages for debugging %s\n",
nr_areas, 1 << page_order, rc->name);
+#endif
out:
if (rc == NULL){
printk(KERN_ERR "debug: debug_register failed for %s\n",name);
if (!id)
goto out;
down(&debug_lock);
+#ifdef DEBUG
printk(KERN_INFO "debug: unregistering %s\n", id->name);
+#endif
debug_info_put(id);
up(&debug_lock);
down(&debug_lock);
if (!initialized) {
+#ifdef CONFIG_PROC_FS
debug_proc_root_entry = proc_mkdir(DEBUG_DIR_ROOT, NULL);
+#endif /* CONFIG_PROC_FS */
printk(KERN_INFO "debug: Initialization complete\n");
initialized = 1;
}
mode |= S_IRUSR;
if (view->input_proc)
mode |= S_IWUSR;
- id->proc_entries[i] =
- debug_create_proc_dir_entry(id->proc_root_entry,
- view->name, mode,
- &debug_file_ops);
+ id->proc_entries[i] = create_proc_entry(view->name, mode,
+ id->proc_root_entry);
+ if (id->proc_entries[i] != NULL)
+ id->proc_entries[i]->proc_fops = &debug_file_ops;
rc = 0;
}
spin_unlock_irqrestore(&id->lock, flags);
if (i == DEBUG_MAX_VIEWS)
rc = -1;
else {
- debug_delete_proc_dir_entry(id->proc_root_entry,
- id->proc_entries[i]);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(id->proc_entries[i]->name,
+ id->proc_root_entry);
+#endif
id->views[i] = NULL;
rc = 0;
}
#ifdef DEBUG
printk("debug_cleanup_module: \n");
#endif
- debug_delete_proc_dir_entry(&proc_root, debug_proc_root_entry);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(debug_proc_root_entry->name, NULL);
+#endif /* CONFIG_PROC_FS */
return;
}
#include <asm/errno.h>
#include <asm/smp.h>
#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include "asm-offsets.h"
/*
* Stack layout for the system_call stack entry.
SP_ORIG_R2 = STACK_FRAME_OVERHEAD + PT_ORIGGPR2
/* Now the additional entries */
SP_TRAP = (SP_ORIG_R2+GPR_SIZE)
-#if CONFIG_REMOTE_DEBUG
-SP_CRREGS = (SP_TRAP+4)
-/* fpu registers are saved & restored by the gdb stub itself */
-SP_FPC = (SP_CRREGS+(NUM_CRS*CR_SIZE))
-SP_FPRS = (SP_FPC+FPC_SIZE+FPC_PAD_SIZE)
-SP_PGM_OLD_ILC= (SP_FPRS+(NUM_FPRS*FPR_SIZE))
-#else
-SP_PGM_OLD_ILC= (SP_TRAP+4)
-#endif
-SP_SIZE = (SP_PGM_OLD_ILC+4)
-/*
- * these defines are offsets into the thread_struct
- */
-_TSS_PTREGS = 0
-_TSS_FPRS = (_TSS_PTREGS+8)
-_TSS_AR2 = (_TSS_FPRS+136)
-_TSS_AR4 = (_TSS_AR2+4)
-_TSS_KSP = (_TSS_AR4+4)
-_TSS_USERSEG = (_TSS_KSP+8)
-_TSS_PROT = (_TSS_USERSEG+8)
-_TSS_ERROR = (_TSS_PROT+8)
-_TSS_TRAP = (_TSS_ERROR+4)
-_TSS_PER = (_TSS_TRAP+4)
-_TSS_IEEE = (_TSS_PER+72)
-_TSS_FLAGS = (_TSS_IEEE+8)
+SP_SIZE = (SP_TRAP+4)
-/*
- * these are offsets into the task-struct.
- */
-state = 0
-flags = 8
-#error sigpending = 16
-#error need_resched = 32
-#error tsk_ptrace = 40
-processor = 92
+_TIF_WORK_MASK = (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
/*
* Register usage in interrupt handlers:
lpswe __LC_RETURN_PSW # back to caller
.endm
- .macro GET_CURRENT
+ .macro GET_THREAD_INFO
lg %r9,__LC_KERNEL_STACK # load pointer to task_struct to %r9
aghi %r9,-16384
.endm
/*
* Scheduler resume function, called by switch_to
- * grp2 = (thread_struct *) prev->tss
- * grp3 = (thread_struct *) next->tss
+ * gpr2 = (task_struct *) prev
+ * gpr3 = (task_struct *) next
* Returns:
* gpr2 = prev
*/
.globl resume
resume:
- lg %r4,_TSS_PTREGS(%r3)
- tm SP_PSW-SP_PTREGS(%r4),0x40 # is the new process using per ?
- jz resume_noper # if not we're fine
+ tm __THREAD_per+4(%r3),0xe8 # is the new process using per ?
+ jz resume_noper # if not we're fine
stctg %c9,%c11,48(%r15) # We are using per stuff
- clc _TSS_PER(24,%r3),48(%r15)
+ clc __THREAD_per(24,%r3),48(%r15)
je resume_noper # we got away without bashing TLB's
- lctlg %c9,%c11,_TSS_PER(%r3) # Nope we didn't
+ lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
resume_noper:
stmg %r6,%r15,48(%r15) # store resume registers of prev task
- stg %r15,_TSS_KSP(%r2) # store kernel stack ptr to prev->tss.ksp
- lghi %r0,-16384
- ngr %r0,%r15
- lg %r15,_TSS_KSP(%r3) # load kernel stack ptr from next->tss.ksp
- lghi %r1,16383
- ogr %r1,%r15
- aghi %r1,1
- stg %r1,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
- stam %a2,%a2,_TSS_AR2(%r2) # store kernel access reg. 2
- stam %a4,%a4,_TSS_AR4(%r2) # store kernel access reg. 4
- lam %a2,%a2,_TSS_AR2(%r3) # load kernel access reg. 2
- lam %a4,%a4,_TSS_AR4(%r3) # load kernel access reg. 4
- lgr %r2,%r0 # return task_struct of last task
+ stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
+ lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
+ stam %a2,%a2,__THREAD_ar2(%r2) # store kernel access reg. 2
+ stam %a4,%a4,__THREAD_ar4(%r2) # store kernel access reg. 4
+ lam %a2,%a2,__THREAD_ar2(%r3) # load kernel access reg. 2
+ lam %a4,%a4,__THREAD_ar4(%r3) # load kernel access reg. 4
lmg %r6,%r15,48(%r15) # load resume registers of next task
- br %r14
+ lg %r3,__THREAD_info(%r3) # load thread_info from task struct
+ aghi %r3,16384
+ stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
+ br %r14
/*
* do_softirq calling function. We want to run the softirq functions on the
.globl system_call
system_call:
SAVE_ALL __LC_SVC_OLD_PSW,1
- mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid
-pgm_system_call:
- GET_CURRENT # load pointer to task_struct to R9
- larl %r7,sys_call_table
- llgc %r8,__LC_SVC_INT_CODE+1 # get svc number from lowcore
+ llgh %r8,__LC_SVC_INT_CODE # get svc number from lowcore
+ GET_THREAD_INFO # load pointer to task_struct to R9
stosm 48(%r15),0x03 # reenable interrupts
+ larl %r7,sys_call_table
sll %r8,3
tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ?
jo sysc_noemu
- la %r8,4(%r8) # use 31 bit emulation system calls
+ la %r7,4(%r7) # use 31 bit emulation system calls
sysc_noemu:
lgf %r8,0(%r8,%r7) # load address of system call routine
- tm tsk_ptrace+7(%r9),0x02 # PT_TRACESYS
- jnz sysc_tracesys
+ tm __TI_flags+7(%r9),_TIF_SYSCALL_TRACE
+ jo sysc_tracesys
basr %r14,%r8 # call sys_xxxx
stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
# ATTENTION: check sys_execve_glue before
# changing anything here !!
sysc_return:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- jno sysc_leave # no-> skip bottom half, resched & signal
-#
-# check, if reschedule is needed
-#
-#error lg %r0,need_resched(%r9) # get need_resched from task_struct
- ltgr %r0,%r0
- jnz sysc_reschedule
-#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
- jnz sysc_signal_return
+ stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
+ tm __TI_flags+7(%r9),_TIF_WORK_MASK
+ jnz sysc_work # there is work to do (signals etc.)
sysc_leave:
- tm SP_PGM_OLD_ILC(%r15),0xff
- jz pgm_svcret
- stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
RESTORE_ALL 1
+#
+# One of the work bits _TIF_NOTIFY_RESUME, _TIF_SIGPENDING or
+# _TIF_NEED_RESCHED is on. Find out which one.
+#
+sysc_work:
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
+ jno sysc_leave # no-> skip resched & signal
+ tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
+ jo sysc_reschedule
+ # add a test for TIF_NOTIFY_RESUME here when it is used.
+ # _TIF_SIGPENDING is the only flag left
+
#
# call do_signal before return
#
sysc_signal_return:
la %r2,SP_PTREGS(%r15) # load pt_regs
sgr %r3,%r3 # clear *oldset
- larl %r14,sysc_leave
- jg do_signal # return point is sysc_leave
+ larl %r14,sysc_return
+ jg do_signal # return point is sysc_return
#
-# call trace before and after sys_call
+# call schedule with sysc_return as return-address
+#
+sysc_reschedule:
+ larl %r14,sysc_return
+ jg schedule # return point is sysc_return
+
+#
+# call syscall_trace before and after system call
+# special linkage: %r12 contains the return address for trace_svc
#
sysc_tracesys:
- lghi %r2,-ENOSYS
- stg %r2,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
-#error brasl %r14,syscall_trace
+ lghi %r0,-ENOSYS
+ stg %r0,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
+ brasl %r14,syscall_trace
+ larl %r6,.Lc256
+ clc SP_R2(8,%r15),0(%r6)
+ jnl sysc_tracego
lg %r2,SP_R2(%r15)
- cghi %r2,-ENOSYS
- je sysc_tracesys_dn1
- sllg %r2,%r2,56 # strace wants to change the syscall
- srlg %r2,%r2,53 # zap unused bits & multiply by 8
- tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ?
- jo sysc_tracesys_noemu
- la %r2,4(%r2) # use 31 bit emulation system calls
-sysc_tracesys_noemu:
- lgf %r8,0(%r2,%r7) # load address of system call routine
-sysc_tracesys_dn1:
+ sllg %r2,%r2,3 # strace wants to change the syscall
+ lgf %r8,0(%r2,%r7)
+sysc_tracego:
lmg %r3,%r6,SP_R3(%r15)
lg %r2,SP_ORIG_R2(%r15)
basr %r14,%r8 # call sys_xxx
stg %r2,SP_R2(%r15) # store return value
- larl %r14,sysc_return
-#error jg syscall_trace # return point is sysc_return
-
-#
-# call schedule with sysc_return as return-address
-#
-sysc_reschedule:
- larl %r14,sysc_return
- jg schedule # return point is sysc_return
+ tm __TI_flags+7(%r9),_TIF_SYSCALL_TRACE
+ jno sysc_return
+ larl %r14,sysc_return # return point is sysc_return
+ jg syscall_trace
#
# a new process exits the kernel with ret_from_fork
#
.globl ret_from_fork
ret_from_fork:
- GET_CURRENT # load pointer to task_struct to R9
- stosm 48(%r15),0x03 # reenable interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
xc SP_R2(8,%r15),SP_R2(%r15) # child returns 0
-#if CONFIG_SMP || CONFIG_PREEMPT
+#ifdef CONFIG_SMP
larl %r14,sysc_return
jg schedule_tail # return to sysc_return
#else
- j sysc_return
+ j sysc_return
#endif
#
.long SYSCALL(sys_oldumount,sys32_oldumount_wrapper)
.long SYSCALL(sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/
.long SYSCALL(sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/
- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 25 old stime syscall */
+ .long SYSCALL(sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */
.long SYSCALL(sys_ptrace,sys32_ptrace_wrapper)
.long SYSCALL(sys_alarm,sys32_alarm_wrapper)
.long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old fstat syscall */
.long SYSCALL(sys_sigreturn_glue,sys32_sigreturn_glue)
.long SYSCALL(sys_clone_glue,sys_clone_glue) /* 120 */
.long SYSCALL(sys_setdomainname,sys32_setdomainname_wrapper)
- .long SYSCALL(sys_newuname,sys32_newuname_wrapper)
+ .long SYSCALL(s390x_newuname,sys32_newuname_wrapper)
.long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* modify_ldt for i386 */
.long SYSCALL(sys_adjtimex,sys32_adjtimex_wrapper)
.long SYSCALL(sys_mprotect,sys32_mprotect_wrapper) /* 125 */
.long SYSCALL(sys_fchdir,sys32_fchdir_wrapper)
.long SYSCALL(sys_bdflush,sys32_bdflush_wrapper)
.long SYSCALL(sys_sysfs,sys32_sysfs_wrapper) /* 135 */
- .long SYSCALL(sys_personality,sys32_personality_wrapper)
+ .long SYSCALL(s390x_personality,sys32_personality_wrapper)
.long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* for afs_syscall */
.long SYSCALL(sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */
.long SYSCALL(sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */
.long SYSCALL(sys_writev,sys32_writev_wrapper)
.long SYSCALL(sys_getsid,sys32_getsid_wrapper)
.long SYSCALL(sys_fdatasync,sys32_fdatasync_wrapper)
- .long SYSCALL(sys_sysctl,sys_ni_syscall)
+ .long SYSCALL(sys_sysctl,sys32_sysctl_wrapper)
.long SYSCALL(sys_mlock,sys32_mlock_wrapper) /* 150 */
.long SYSCALL(sys_munlock,sys32_munlock_wrapper)
.long SYSCALL(sys_mlockall,sys32_mlockall_wrapper)
.long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams1 */
.long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams2 */
.long SYSCALL(sys_vfork_glue,sys_vfork_glue) /* 190 */
- .long SYSCALL(sys_getrlimit,sys32_old_getrlimit_wrapper)
+ .long SYSCALL(sys_getrlimit,sys32_getrlimit_wrapper)
.long SYSCALL(sys_mmap2,sys32_mmap2_wrapper)
.long SYSCALL(sys_ni_syscall,sys32_truncate64_wrapper)
.long SYSCALL(sys_ni_syscall,sys32_ftruncate64_wrapper)
.long SYSCALL(sys_madvise,sys32_madvise_wrapper)
.long SYSCALL(sys_getdents64,sys32_getdents64_wrapper)/* 220 */
.long SYSCALL(sys_ni_syscall,sys32_fcntl64_wrapper)
- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 222 - reserved for posix_acl */
- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 223 - reserved for posix_acl */
- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 224 - reserved for posix_acl */
- .rept 255-224
- .long SYSCALL(sys_ni_syscall,sys_ni_syscall)
+ .long SYSCALL(sys_readahead,sys32_readahead)
+ .long SYSCALL(sys_ni_syscall,sys32_sendfile64)
+ .long SYSCALL(sys_setxattr,sys32_setxattr_wrapper)
+ .long SYSCALL(sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
+ .long SYSCALL(sys_fsetxattr,sys32_fsetxattr_wrapper)
+ .long SYSCALL(sys_getxattr,sys32_getxattr_wrapper)
+ .long SYSCALL(sys_lgetxattr,sys32_lgetxattr_wrapper)
+ .long SYSCALL(sys_fgetxattr,sys32_fgetxattr_wrapper)
+ .long SYSCALL(sys_listxattr,sys32_listxattr_wrapper) /* 230 */
+ .long SYSCALL(sys_llistxattr,sys32_llistxattr_wrapper)
+ .long SYSCALL(sys_flistxattr,sys32_flistxattr_wrapper)
+ .long SYSCALL(sys_removexattr,sys32_removexattr_wrapper)
+ .long SYSCALL(sys_lremovexattr,sys32_lremovexattr_wrapper)
+ .long SYSCALL(sys_fremovexattr,sys32_fremovexattr_wrapper)
.long SYSCALL(sys_gettid,sys_gettid)
.long SYSCALL(sys_tkill,sys_tkill)
+ .long SYSCALL(sys_futex,sys32_futex_wrapper)
+ .long SYSCALL(sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
+ .long SYSCALL(sys_sched_getaffinity,sys32_sched_getaffinity_wrapper)
+ .rept 255-240
+ .long SYSCALL(sys_ni_syscall,sys_ni_syscall)
.endr
/*
* for LPSW?).
*/
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
- jz pgm_sv # skip if not
+ jnz pgm_per # got per exception -> special case
+ SAVE_ALL __LC_PGM_OLD_PSW,1
+ llgh %r8,__LC_PGM_INT_CODE
+ sll %r8,3
+ GET_THREAD_INFO
+ larl %r1,pgm_check_table
+ lg %r1,0(%r8,%r1) # load address of handler routine
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ lgf %r3,__LC_PGM_ILC # load program interruption code
+ larl %r14,sysc_return
+ br %r1 # branch to interrupt-handler
+
+#
+# handle per exception
+#
+pgm_per:
tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
- jnz pgm_sv # skip if it is
+ jnz pgm_per_std # ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
je pgm_svcper
# no interesting special case, ignore PER event
lpswe __LC_PGM_OLD_PSW
+
+#
+# Normal per exception
+#
+pgm_per_std:
+ SAVE_ALL __LC_PGM_OLD_PSW,1
+ GET_THREAD_INFO
+ lghi %r4,0x7f
+ lgf %r3,__LC_PGM_ILC # load program interruption code
+ nr %r4,%r3 # clear per-event-bit and ilc
+ je pgm_per_only # only per of per+check ?
+ sll %r4,3
+ larl %r1,pgm_check_table
+ lg %r1,0(%r4,%r1) # load address of handler routine
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ basr %r14,%r1 # branch to interrupt-handler
+pgm_per_only:
+ la %r2,SP_PTREGS(15) # address of register-save area
+ larl %r14,sysc_return # load adr. of system return
+ jg handle_per_exception
+
+#
# it was a single stepped SVC that is causing all the trouble
+#
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,1
- mvc SP_PGM_OLD_ILC(4,%r15),__LC_PGM_ILC # save program check information
- j pgm_system_call # now do the svc
+ llgh %r8,__LC_SVC_INT_CODE # get svc number from lowcore
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ stosm 48(%r15),0x03 # reenable interrupts
+ larl %r7,sys_call_table
+ sll %r8,3
+ tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ?
+ jo pgm_svcper_noemu
+ la %r7,4(%r8) # use 31 bit emulation system calls
+pgm_svcper_noemu:
+ lgf %r8,0(%r8,%r7) # load address of system call routine
+ tm __TI_flags+3(%r9),_TIF_SYSCALL_TRACE
+ jo pgm_tracesys
+ basr %r14,%r8 # call sys_xxxx
+ stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
+ # ATTENTION: check sys_execve_glue before
+ # changing anything here !!
+
pgm_svcret:
+ tm __TI_flags+3(%r9),_TIF_SIGPENDING
+ jo pgm_svcper_nosig
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ sgr %r3,%r3 # clear *oldset
+ brasl %r14,do_signal
+
+pgm_svcper_nosig:
lhi %r0,__LC_PGM_OLD_PSW # set trap indication back to pgm_chk
st %r0,SP_TRAP(%r15)
- llgh %r7,SP_PGM_OLD_ILC(%r15) # get ilc from stack
- mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid
- j pgm_no_sv
-pgm_sv:
- SAVE_ALL __LC_PGM_OLD_PSW,1
- mvi SP_PGM_OLD_ILC(%r15),1 # mark PGM_OLD_ILC as invalid
- llgh %r7,__LC_PGM_ILC # load instruction length
- GET_CURRENT
-pgm_no_sv:
- llgh %r8,__LC_PGM_INT_CODE # N.B. saved int code used later KEEP it
- lghi %r3,0x7f
- nr %r3,%r8 # clear per-event-bit & move to r3
- je pgm_dn # none of Martins exceptions occurred bypass
- sll %r3,3
- larl %r1,pgm_check_table
- lg %r1,0(%r3,%r1) # load address of handler routine
- srl %r3,3
- la %r2,SP_PTREGS(%r15) # address of register-save area
- chi %r3,0x4 # protection-exception ?
- jne pgm_go # if not,
- lg %r5,SP_PSW+8(15) # load psw addr
- slgr %r5,%r7 # substract ilc from psw
- stg %r5,SP_PSW+8(15) # store corrected psw addr
-pgm_go: basr %r14,%r1 # branch to interrupt-handler
-pgm_dn: nill %r8,0x80 # check for per exception
- je sysc_return
la %r2,SP_PTREGS(15) # address of register-save area
larl %r14,sysc_return # load adr. of system return
jg handle_per_exception
+#
+# call trace before and after sys_call
+#
+pgm_tracesys:
+ lghi %r0,-ENOSYS
+ stg %r0,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
+ brasl %r14,syscall_trace
+ lg %r2,SP_R2(%r15)
+ cghi %r2,256
+ jnl pgm_svc_go
+ sllg %r2,%r2,3 # strace wants to change the syscall
+ lgf %r8,0(%r2,%r7)
+pgm_svc_go:
+ lmg %r3,%r6,SP_R3(%r15)
+ lg %r2,SP_ORIG_R2(%r15)
+ basr %r14,%r8 # call sys_xxx
+ stg %r2,SP_R2(%r15) # store return value
+ tm __TI_flags+7(%r9),_TIF_SYSCALL_TRACE
+ jno pgm_svcret
+ larl %r14,pgm_svcret # return point is sysc_return
+ jg syscall_trace
/*
* IO interrupt handler routine
.globl io_int_handler
io_int_handler:
SAVE_ALL __LC_IO_OLD_PSW,0
- GET_CURRENT # load pointer to task_struct to R9
+ GET_THREAD_INFO # load pointer to task_struct to R9
la %r2,SP_PTREGS(%r15) # address of register-save area
llgh %r3,__LC_SUBCHANNEL_NR # load subchannel number
llgf %r4,__LC_IO_INT_PARM # load interuption parm
#
# check, if bottom-half has to be done
#
- lgf %r1,processor(%r9) # get cpu number from task struture
+ lgf %r1,__TI_cpu(%r9)
larl %r2,irq_stat
sll %r1,L1_CACHE_SHIFT
la %r1,0(%r1,%r2)
- icm %r0,15,0(%r1) # test irq_stat[#cpu].__softirq_pending
+ icm %r0,15,0(%r1) # test irq_stat[#cpu].__softirq_pending
jnz io_handle_bottom_half
io_return_bh:
-
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- jno io_leave # no-> skip resched & signal
- stosm 48(%r15),0x03 # reenable interrupts
-#
-# check, if reschedule is needed
-#
-#error lg %r0,need_resched(%r9) # get need_resched from task_struct
- ltgr %r0,%r0
- jnz io_reschedule
-#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
- jnz io_signal_return
+ tm __TI_flags+7(%r9),_TIF_WORK_MASK
+ jnz io_work # there is work to do (signals etc.)
io_leave:
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
RESTORE_ALL 0
#
-# call do_softirq and return from syscall, if interrupt-level
-# is zero
+# call do_softirq
#
io_handle_bottom_half:
larl %r14,io_return_bh
- jg do_softirq # return point is io_return_bh
+ jg do_softirq # return point is io_return_bh
#
-# call schedule with io_return as return-address
+# One of the work bits _TIF_NOTIFY_RESUME, _TIF_SIGPENDING or
+# _TIF_NEED_RESCHED is on. Find out which one.
#
-io_reschedule:
- larl %r14,io_return
- jg schedule # call scheduler, return to io_return
+io_work:
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
+ jno io_leave # no-> skip resched & signal
+ stosm 48(%r15),0x03 # reenable interrupts
+ tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
+ jo io_reschedule
+ # add a test for TIF_NOTIFY_RESUME here when it is used.
+ # _TIF_SIGPENDING is the only flag left
#
# call do_signal before return
larl %r14,io_leave
jg do_signal # return point is io_leave
+#
+# call schedule with io_return as return-address
+#
+io_reschedule:
+ larl %r14,io_return
+ jg schedule # call scheduler, return to io_return
+
/*
* External interrupt handler routine
*/
.globl ext_int_handler
ext_int_handler:
SAVE_ALL __LC_EXT_OLD_PSW,0
- GET_CURRENT # load pointer to task_struct to R9
- la %r2,SP_PTREGS(%r15) # address of register-save area
- llgh %r3,__LC_EXT_INT_CODE # error code
- lgr %r1,%r3 # calculate index = code & 0xff
- nill %r1,0xff
- sll %r1,3
- larl %r4,ext_int_hash
- lg %r4,0(%r1,%r4) # get first list entry for hash value
- ltgr %r4,%r4 # == NULL ?
- jz io_return # yes, nothing to do, exit
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ llgh %r6,__LC_EXT_INT_CODE # get interruption code
+ lgr %r1,%r6 # calculate index = code & 0xff
+ nill %r1,0xff
+ sll %r1,3
+ larl %r7,ext_int_hash
+ lg %r7,0(%r1,%r7) # get first list entry for hash value
+ ltgr %r7,%r7 # == NULL ?
+ jz io_return # yes, nothing to do, exit
ext_int_loop:
- ch %r3,16(%r4) # compare external interrupt code
- je ext_int_found
- lg %r4,0(%r4) # next list entry
- ltgr %r4,%r4
- jnz ext_int_loop
- j io_return
-ext_int_found:
- lg %r4,8(%r4) # get handler address
- larl %r14,io_return
- br %r4 # branch to ext call handler
+ ch %r6,16(%r7) # compare external interrupt code
+ jne ext_int_next
+ lg %r1,8(%r7) # get handler address
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ lgr %r3,%r6 # interruption code
+ basr %r14,%r1 # call handler
+ext_int_next:
+ lg %r7,0(%r7) # next list entry
+ ltgr %r7,%r7
+ jnz ext_int_loop
+ j io_return
/*
* Machine check handler routines
*/
.align 4
.Lc_ac: .long 0,0,1
+.Lc256: .quad 256
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
+#include <linux/binfmts.h>
#define __NO_VERSION__
#include <linux/module.h>
--- /dev/null
+/*
+ * Support for 32-bit Linux for S390 personality.
+ *
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/personality.h>
+#include <linux/sched.h>
+
+struct exec_domain s390_exec_domain;
+
+static int __init
+s390_init (void)
+{
+ s390_exec_domain.name = "Linux/s390";
+ s390_exec_domain.handler = NULL;
+ s390_exec_domain.pers_low = PER_LINUX32;
+ s390_exec_domain.pers_high = PER_LINUX32;
+ s390_exec_domain.signal_map = default_exec_domain.signal_map;
+ s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+ register_exec_domain(&s390_exec_domain);
+ return 0;
+}
+
+__initcall(s390_init);
bnz .Lrdcont
st %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it
.Lrdcont:
- l %r2,INITRD_START-PARMAREA(%r12)
+ l %r2,INITRD_START+4-PARMAREA(%r12)
clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
bz .Lagain2
clc 0(3,%r2),.L_eof
#
# Setup stack
#
- larl %r15,init_task_union
+ larl %r15,init_thread_union
aghi %r15,16384 # init_task_union + 16384
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-static struct vm_area_struct init_mmap = INIT_MMAP;
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm);
/*
- * Initial task structure.
+ * Initial thread structure.
*
- * We need to make sure that this is 16384-byte aligned due to the
- * way process stacks are handled. This is done by making sure
- * the linker maps this in the .text segment right after head.S,
- * and making head.S ensure the proper alignment.
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
*
- * The things we do for performance..
+ * All other task structs will be allocated on slabs in fork.c
*/
-union task_union init_task_union __attribute__((aligned(16384))) =
- { INIT_TASK(init_task_union.task) };
+struct task_struct init_task = INIT_TASK(init_task);
+
#include <linux/route.h>
#include <linux/ext2_fs.h>
#include <linux/hdreg.h>
+#include <linux/if_bonding.h>
+#include <linux/loop.h>
+#include <linux/blkpg.h>
+#include <linux/blk.h>
+#include <linux/elevator.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/dasd.h>
+#include <asm/tape390.h>
#include <asm/sockios.h>
+#include <asm/ioctls.h>
#include "linux32.h"
return err;
}
+static int bond_ioctl(unsigned long fd, unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ mm_segment_t old_fs;
+ int err, len;
+ u32 data;
+
+ if (copy_from_user(&ifr, (struct ifreq32 *)arg, sizeof(struct ifreq32)))
+ return -EFAULT;
+ ifr.ifr_data = (__kernel_caddr_t)get_free_page(GFP_KERNEL);
+ if (!ifr.ifr_data)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ len = IFNAMSIZ * sizeof(char);
+ break;
+ case SIOCBONDSLAVEINFOQUERY:
+ len = sizeof(struct ifslave);
+ break;
+ case SIOCBONDINFOQUERY:
+ len = sizeof(struct ifbond);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ };
+
+ __get_user(data, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_data));
+ if (copy_from_user(ifr.ifr_data, (char *)A(data), len)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
+ set_fs (old_fs);
+ if (!err) {
+ len = copy_to_user((char *)A(data), ifr.ifr_data, len);
+ if (len)
+ err = -EFAULT;
+ }
+
+out:
+ free_page((unsigned long)ifr.ifr_data);
+ return err;
+}
+
static inline int dev_ifsioc(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
return sys_ioctl(fd, cmd, arg);
}
+
+struct loop_info32 {
+ int lo_number; /* ioctl r/o */
+ __kernel_dev_t32 lo_device; /* ioctl r/o */
+ unsigned int lo_inode; /* ioctl r/o */
+ __kernel_dev_t32 lo_rdevice; /* ioctl r/o */
+ int lo_offset;
+ int lo_encrypt_type;
+ int lo_encrypt_key_size; /* ioctl w/o */
+ int lo_flags; /* ioctl r/o */
+ char lo_name[LO_NAME_SIZE];
+ unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
+ unsigned int lo_init[2];
+ char reserved[4];
+};
+
+static int loop_status(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct loop_info l;
+ int err = -EINVAL;
+
+ switch(cmd) {
+ case LOOP_SET_STATUS:
+ err = get_user(l.lo_number, &((struct loop_info32 *)arg)->lo_number);
+ err |= __get_user(l.lo_device, &((struct loop_info32 *)arg)->lo_device);
+ err |= __get_user(l.lo_inode, &((struct loop_info32 *)arg)->lo_inode);
+ err |= __get_user(l.lo_rdevice, &((struct loop_info32 *)arg)->lo_rdevice);
+ err |= __copy_from_user((char *)&l.lo_offset, (char *)&((struct loop_info32 *)arg)->lo_offset,
+ 8 + (unsigned long)l.lo_init - (unsigned long)&l.lo_offset);
+ if (err) {
+ err = -EFAULT;
+ } else {
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&l);
+ set_fs (old_fs);
+ }
+ break;
+ case LOOP_GET_STATUS:
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&l);
+ set_fs (old_fs);
+ if (!err) {
+ err = put_user(l.lo_number, &((struct loop_info32 *)arg)->lo_number);
+ err |= __put_user(l.lo_device, &((struct loop_info32 *)arg)->lo_device);
+ err |= __put_user(l.lo_inode, &((struct loop_info32 *)arg)->lo_inode);
+ err |= __put_user(l.lo_rdevice, &((struct loop_info32 *)arg)->lo_rdevice);
+ err |= __copy_to_user((char *)&((struct loop_info32 *)arg)->lo_offset,
+ (char *)&l.lo_offset, (unsigned long)l.lo_init - (unsigned long)&l.lo_offset);
+ if (err)
+ err = -EFAULT;
+ }
+ break;
+ default: {
+ static int count = 0;
+ if (++count <= 20)
+ printk("%s: Unknown loop ioctl cmd, fd(%d) "
+ "cmd(%08x) arg(%08lx)\n",
+ __FUNCTION__, fd, cmd, arg);
+ }
+ }
+ return err;
+}
+
+
+struct blkpg_ioctl_arg32 {
+ int op;
+ int flags;
+ int datalen;
+ u32 data;
+};
+
+static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, struct blkpg_ioctl_arg32 *arg)
+{
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+ int err;
+ mm_segment_t old_fs = get_fs();
+
+ err = get_user(a.op, &arg->op);
+ err |= __get_user(a.flags, &arg->flags);
+ err |= __get_user(a.datalen, &arg->datalen);
+ err |= __get_user((long)a.data, &arg->data);
+ if (err) return err;
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+ case BLKPG_DEL_PARTITION:
+ if (a.datalen < sizeof(struct blkpg_partition))
+ return -EINVAL;
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+ a.data = &p;
+ set_fs (KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&a);
+ set_fs (old_fs);
+ default:
+ return -EINVAL;
+ }
+ return err;
+}
+
+
+typedef struct ica_z90_status_t {
+ int totalcount;
+ int leedslitecount;
+ int leeds2count;
+ int requestqWaitCount;
+ int pendingqWaitCount;
+ int totalOpenCount;
+ int cryptoDomain;
+ unsigned char status[64];
+ unsigned char qdepth[64];
+} ica_z90_status;
+
+typedef struct _ica_rsa_modexpo {
+ char *inputdata;
+ unsigned int inputdatalength;
+ char *outputdata;
+ unsigned int outputdatalength;
+ char *b_key;
+ char *n_modulus;
+} ica_rsa_modexpo_t;
+
+typedef struct _ica_rsa_modexpo_32 {
+ u32 inputdata;
+ u32 inputdatalength;
+ u32 outputdata;
+ u32 outputdatalength;
+ u32 b_key;
+ u32 n_modulus;
+} ica_rsa_modexpo_32_t;
+
+typedef struct _ica_rsa_modexpo_crt {
+ char *inputdata;
+ unsigned int inputdatalength;
+ char *outputdata;
+ unsigned int outputdatalength;
+ char *bp_key;
+ char *bq_key;
+ char *np_prime;
+ char *nq_prime;
+ char *u_mult_inv;
+} ica_rsa_modexpo_crt_t;
+
+typedef struct _ica_rsa_modexpo_crt_32 {
+ u32 inputdata;
+ u32 inputdatalength;
+ u32 outputdata;
+ u32 outputdatalength;
+ u32 bp_key;
+ u32 bq_key;
+ u32 np_prime;
+ u32 nq_prime;
+ u32 u_mult_inv;
+} ica_rsa_modexpo_crt_32_t;
+
+#define ICA_IOCTL_MAGIC 'z'
+#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ICA_IOCTL_MAGIC, 0x05, 0)
+#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ICA_IOCTL_MAGIC, 0x06, 0)
+#define ICARSAMODMULT _IOC(_IOC_READ|_IOC_WRITE, ICA_IOCTL_MAGIC, 0x07, 0)
+#define ICAZ90STATUS _IOC(_IOC_READ, ICA_IOCTL_MAGIC, 0x10, sizeof(ica_z90_status))
+#define ICAZ90QUIESCE _IOC(_IOC_NONE, ICA_IOCTL_MAGIC, 0x11, 0)
+#define ICAZ90HARDRESET _IOC(_IOC_NONE, ICA_IOCTL_MAGIC, 0x12, 0)
+#define ICAZ90HARDERROR _IOC(_IOC_NONE, ICA_IOCTL_MAGIC, 0x13, 0)
+
+static int do_rsa_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ int err = 0;
+ ica_rsa_modexpo_t rsa;
+ ica_rsa_modexpo_32_t *rsa32 = (ica_rsa_modexpo_32_t *)arg;
+ u32 inputdata, outputdata, b_key, n_modulus;
+
+ memset (&rsa, 0, sizeof(rsa));
+
+ err |= __get_user (inputdata, &rsa32->inputdata);
+ err |= __get_user (rsa.inputdatalength, &rsa32->inputdatalength);
+ err |= __get_user (outputdata, &rsa32->outputdata);
+ err |= __get_user (rsa.outputdatalength, &rsa32->outputdatalength);
+ err |= __get_user (b_key, &rsa32->b_key);
+ err |= __get_user (n_modulus, &rsa32->n_modulus);
+ if (err)
+ return -EFAULT;
+
+ rsa.inputdata = (char *)kmalloc(rsa.inputdatalength, GFP_KERNEL);
+ if (!rsa.inputdata) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.inputdata, (char *)(u64)(inputdata & 0x7fffffff),
+ rsa.inputdatalength)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.outputdata = (char *)kmalloc(rsa.outputdatalength, GFP_KERNEL);
+ if (!rsa.outputdata) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ rsa.b_key = (char *)kmalloc(rsa.inputdatalength, GFP_KERNEL);
+ if (!rsa.b_key) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.b_key, (char *)(u64)(b_key & 0x7fffffff),
+ rsa.inputdatalength)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.n_modulus = (char *)kmalloc(rsa.inputdatalength, GFP_KERNEL);
+ if (!rsa.n_modulus) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.n_modulus, (char *)(u64)(n_modulus & 0x7fffffff),
+ rsa.inputdatalength)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&rsa);
+ set_fs(old_fs);
+ if (err < 0)
+ goto cleanup;
+
+ if (copy_to_user((char *)(u64)(outputdata & 0x7fffffff), rsa.outputdata,
+ rsa.outputdatalength))
+ err = -EFAULT;
+
+cleanup:
+ if (rsa.inputdata)
+ kfree(rsa.inputdata);
+ if (rsa.outputdata)
+ kfree(rsa.outputdata);
+ if (rsa.b_key)
+ kfree(rsa.b_key);
+ if (rsa.n_modulus)
+ kfree(rsa.n_modulus);
+
+ return err;
+}
+
+static int do_rsa_crt_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ int err = 0;
+ ica_rsa_modexpo_crt_t rsa;
+ ica_rsa_modexpo_crt_32_t *rsa32 = (ica_rsa_modexpo_crt_32_t *)arg;
+ u32 inputdata, outputdata, bp_key, bq_key, np_prime, nq_prime, u_mult_inv;
+
+ memset (&rsa, 0, sizeof(rsa));
+
+ err |= __get_user (inputdata, &rsa32->inputdata);
+ err |= __get_user (rsa.inputdatalength, &rsa32->inputdatalength);
+ err |= __get_user (outputdata, &rsa32->outputdata);
+ err |= __get_user (rsa.outputdatalength, &rsa32->outputdatalength);
+ err |= __get_user (bp_key, &rsa32->bp_key);
+ err |= __get_user (bq_key, &rsa32->bq_key);
+ err |= __get_user (np_prime, &rsa32->np_prime);
+ err |= __get_user (nq_prime, &rsa32->nq_prime);
+ err |= __get_user (u_mult_inv, &rsa32->u_mult_inv);
+ if (err)
+ return -EFAULT;
+
+ rsa.inputdata = (char *)kmalloc(rsa.inputdatalength, GFP_KERNEL);
+ if (!rsa.inputdata) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.inputdata, (char *)(u64)(inputdata & 0x7fffffff),
+ rsa.inputdatalength)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.outputdata = (char *)kmalloc(rsa.outputdatalength, GFP_KERNEL);
+ if (!rsa.outputdata) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ rsa.bp_key = (char *)kmalloc(rsa.inputdatalength/2 + 8, GFP_KERNEL);
+ if (!rsa.bp_key) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.bp_key, (char *)(u64)(bp_key & 0x7fffffff),
+ rsa.inputdatalength/2 + 8)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.bq_key = (char *)kmalloc(rsa.inputdatalength/2, GFP_KERNEL);
+ if (!rsa.bq_key) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.bq_key, (char *)(u64)(bq_key & 0x7fffffff),
+ rsa.inputdatalength/2)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.np_prime = (char *)kmalloc(rsa.inputdatalength/2 + 8, GFP_KERNEL);
+ if (!rsa.np_prime) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.np_prime, (char *)(u64)(np_prime & 0x7fffffff),
+ rsa.inputdatalength/2 + 8)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.nq_prime = (char *)kmalloc(rsa.inputdatalength/2, GFP_KERNEL);
+ if (!rsa.nq_prime) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.nq_prime, (char *)(u64)(nq_prime & 0x7fffffff),
+ rsa.inputdatalength/2)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ rsa.u_mult_inv = (char *)kmalloc(rsa.inputdatalength/2 + 8, GFP_KERNEL);
+ if (!rsa.u_mult_inv) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (copy_from_user(rsa.u_mult_inv, (char *)(u64)(u_mult_inv & 0x7fffffff),
+ rsa.inputdatalength/2 + 8)) {
+ err = -EFAULT;
+ goto cleanup;
+ }
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&rsa);
+ set_fs(old_fs);
+ if (err < 0)
+ goto cleanup;
+
+ if (copy_to_user((char *)(u64)(outputdata & 0x7fffffff), rsa.outputdata,
+ rsa.outputdatalength))
+ err = -EFAULT;
+
+cleanup:
+ if (rsa.inputdata)
+ kfree(rsa.inputdata);
+ if (rsa.outputdata)
+ kfree(rsa.outputdata);
+ if (rsa.bp_key)
+ kfree(rsa.bp_key);
+ if (rsa.bq_key)
+ kfree(rsa.bq_key);
+ if (rsa.np_prime)
+ kfree(rsa.np_prime);
+ if (rsa.nq_prime)
+ kfree(rsa.nq_prime);
+ if (rsa.u_mult_inv)
+ kfree(rsa.u_mult_inv);
+
+ return err;
+}
+
static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
IOCTL32_DEFAULT(BIODASDINFO),
IOCTL32_DEFAULT(BIODASDFMT),
+ IOCTL32_DEFAULT(TAPE390_DISPLAY),
+
+ IOCTL32_DEFAULT(BLKROSET),
+ IOCTL32_DEFAULT(BLKROGET),
IOCTL32_DEFAULT(BLKRRPART),
+ IOCTL32_DEFAULT(BLKFLSBUF),
+ IOCTL32_DEFAULT(BLKRASET),
+ IOCTL32_DEFAULT(BLKFRASET),
+ IOCTL32_DEFAULT(BLKSECTSET),
+ IOCTL32_DEFAULT(BLKSSZGET),
+ IOCTL32_DEFAULT(BLKBSZGET),
+ IOCTL32_DEFAULT(BLKGETSIZE64),
+
+ IOCTL32_DEFAULT(BLKELVGET),
+ IOCTL32_DEFAULT(BLKELVSET),
IOCTL32_HANDLER(HDIO_GETGEO, hd_geometry_ioctl),
IOCTL32_DEFAULT(SIOCGSTAMP),
+ IOCTL32_DEFAULT(LOOP_SET_FD),
+ IOCTL32_DEFAULT(LOOP_CLR_FD),
+
IOCTL32_HANDLER(SIOCGIFNAME, dev_ifname32),
IOCTL32_HANDLER(SIOCGIFCONF, dev_ifconf),
IOCTL32_HANDLER(SIOCGIFFLAGS, dev_ifsioc),
IOCTL32_HANDLER(EXT2_IOC32_GETVERSION, do_ext2_ioctl),
IOCTL32_HANDLER(EXT2_IOC32_SETVERSION, do_ext2_ioctl),
- IOCTL32_HANDLER(BLKGETSIZE, w_long)
+ IOCTL32_HANDLER(LOOP_SET_STATUS, loop_status),
+ IOCTL32_HANDLER(LOOP_GET_STATUS, loop_status),
+
+ IOCTL32_HANDLER(ICARSAMODEXPO, do_rsa_ioctl),
+ IOCTL32_HANDLER(ICARSACRT, do_rsa_crt_ioctl),
+ IOCTL32_HANDLER(ICARSAMODMULT, do_rsa_ioctl),
+ IOCTL32_DEFAULT(ICAZ90STATUS),
+ IOCTL32_DEFAULT(ICAZ90QUIESCE),
+ IOCTL32_DEFAULT(ICAZ90HARDRESET),
+ IOCTL32_DEFAULT(ICAZ90HARDERROR),
+
+ IOCTL32_HANDLER(BLKRAGET, w_long),
+ IOCTL32_HANDLER(BLKGETSIZE, w_long),
+ IOCTL32_HANDLER(BLKFRAGET, w_long),
+ IOCTL32_HANDLER(BLKSECTGET, w_long),
+ IOCTL32_HANDLER(BLKPG, blkpg_ioctl_trans)
};
int show_interrupts(struct seq_file *p, void *v)
{
int i, j;
- struct irqaction * action;
seq_puts(p, " ");
if (ioinfo[i] == INVALID_STORAGE_AREA)
continue;
- action = ioinfo[i]->irq_desc.action;
-
- if (!action)
- continue;
-
seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for (j=0; j<smp_num_cpus; j++)
- seq_printf( p, "%10u ",
- kstat.irqs[cpu_logical_map(j)][i]);
-#endif
- seq_printf(p, " %14s", ioinfo[i]->irq_desc.handler->typename);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- {
- seq_printf(p, ", %s", action->name);
-
- } /* endfor */
+ seq_printf(p, " %s", ioinfo[i]->irq_desc.name);
seq_putc(p, '\n');
} /* endfor */
- seq_printf(p, "NMI: %10u\n", nmi_counter);
-#ifdef CONFIG_SMP
- seq_printf(p, "IPI: %10u\n", atomic_read(&ipi_count));
-#endif
-
return 0;
}
*/
#define check_smp_invalidate(cpu)
+extern void show_stack(unsigned long* esp);
+
static void show(char * str)
{
- int i;
- unsigned long *stack;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
atomic_read(&global_irq_count),local_irq_count(smp_processor_id()));
printk("bh: %d [%d]\n",
atomic_read(&global_bh_count),local_bh_count(smp_processor_id()));
- stack = (unsigned long *) &str;
- for (i = 40; i ; i--) {
- unsigned long x = *++stack;
- if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
- printk("<[%08lx]> ", x);
- }
- }
+ show_stack(NULL);
}
#define MAXCOUNT 100000000
#include <linux/in.h>
#include <linux/icmpv6.h>
#include <linux/sysctl.h>
+#include <linux/binfmts.h>
#include <asm/types.h>
#include <asm/ipc.h>
#include <asm/semaphore.h>
#include <net/scm.h>
+#include <net/sock.h>
#include "linux32.h"
struct msgbuf32 { s32 mtype; char mtext[1]; };
+struct ipc64_perm_ds32
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t32 mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
struct ipc_perm32
{
key_t key;
};
struct semid64_ds32 {
- struct ipc64_perm sem_perm; /* this structure is the same on sparc32 and sparc64 */
+ struct ipc64_perm_ds32 sem_perm;
unsigned int __pad1;
__kernel_time_t32 sem_otime;
unsigned int __pad2;
};
struct msqid64_ds32 {
- struct ipc64_perm msg_perm;
+ struct ipc64_perm_ds32 msg_perm;
unsigned int __pad1;
__kernel_time_t32 msg_stime;
unsigned int __pad2;
};
struct shmid64_ds32 {
- struct ipc64_perm shm_perm;
- unsigned int __pad1;
+ struct ipc64_perm_ds32 shm_perm;
+ __kernel_size_t32 shm_segsz;
__kernel_time_t32 shm_atime;
- unsigned int __pad2;
+ unsigned int __unused1;
__kernel_time_t32 shm_dtime;
- unsigned int __pad3;
+ unsigned int __unused2;
__kernel_time_t32 shm_ctime;
- __kernel_size_t32 shm_segsz;
+ unsigned int __unused3;
__kernel_pid_t32 shm_cpid;
__kernel_pid_t32 shm_lpid;
unsigned int shm_nattch;
- unsigned int __unused1;
- unsigned int __unused2;
+ unsigned int __unused4;
+ unsigned int __unused5;
};
static int do_sys32_msgsnd (int first, int second, int third, void *uptr)
{
- struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
+ struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf), GFP_USER);
struct msgbuf32 *up = (struct msgbuf32 *)uptr;
mm_segment_t old_fs;
int err;
if (!p)
return -ENOMEM;
+
+ if (second > MSGMAX || first < 0 || second < 0)
+ return -EINVAL;
+
+ err = -EFAULT;
+ if (!uptr)
+ goto out;
+
err = get_user (p->mtype, &up->mtype);
err |= __copy_from_user (p->mtext, &up->mtext, second);
if (err)
mm_segment_t old_fs;
int err;
+ if (first < 0 || second < 0)
+ return -EINVAL;
+
if (!version) {
struct ipc_kludge_32 *uipck = (struct ipc_kludge_32 *)uptr;
struct ipc_kludge_32 ipck;
msgtyp = ipck.msgtyp;
}
err = -ENOMEM;
- p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
+ p = kmalloc (second + sizeof (struct msgbuf), GFP_USER);
if (!p)
goto out;
old_fs = get_fs ();
set_fs (KERNEL_DS);
- err = sys_msgrcv (first, p, second + 4, msgtyp, third);
+ err = sys_msgrcv (first, p, second, msgtyp, third);
set_fs (old_fs);
if (err < 0)
goto free_then_out;
{
switch (cmd) {
case F_GETLK:
- case F_SETLK:
- case F_SETLKW:
{
struct flock f;
mm_segment_t old_fs;
long ret;
- if(get_flock(&f, (struct flock32 *)arg))
+ if(get_flock(&f, (struct flock32 *)A(arg)))
return -EFAULT;
old_fs = get_fs(); set_fs (KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long)&f);
set_fs (old_fs);
if (ret) return ret;
if (f.l_start >= 0x7fffffffUL ||
- f.l_len >= 0x7fffffffUL ||
f.l_start + f.l_len >= 0x7fffffffUL)
return -EOVERFLOW;
- if(put_flock(&f, (struct flock32 *)arg))
+ if(put_flock(&f, (struct flock32 *)A(arg)))
return -EFAULT;
return 0;
}
+ case F_SETLK:
+ case F_SETLKW:
+ {
+ struct flock f;
+ mm_segment_t old_fs;
+ long ret;
+
+ if(get_flock(&f, (struct flock32 *)A(arg)))
+ return -EFAULT;
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ ret = sys_fcntl(fd, cmd, (unsigned long)&f);
+ set_fs (old_fs);
+ if (ret) return ret;
+ return 0;
+ }
default:
return sys_fcntl(fd, cmd, (unsigned long)arg);
}
timeout = MAX_SCHEDULE_TIMEOUT;
if (tvp) {
- time_t sec, usec;
+ int sec, usec;
if ((ret = verify_area(VERIFY_READ, tvp, sizeof(*tvp)))
|| (ret = __get_user(sec, &tvp->tv_sec))
ret = do_select(n, &fds, &timeout);
if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
- time_t sec = 0, usec = 0;
+ int sec = 0, usec = 0;
if (timeout) {
sec = timeout / HZ;
usec = timeout % HZ;
return ret;
}
-#define RLIM_INFINITY32 0x7fffffff
-#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
+#define RLIM_OLD_INFINITY32 0x7fffffff
+#define RLIM_INFINITY32 0xffffffff
+#define RESOURCE32_OLD(x) ((x > RLIM_OLD_INFINITY32) ? RLIM_OLD_INFINITY32 : x)
+#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
u32 rlim_cur;
u32 rlim_max;
};
-extern asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit *rlim);
+extern asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim);
asmlinkage int sys32_old_getrlimit(unsigned int resource, struct rlimit32 *rlim)
{
mm_segment_t old_fs = get_fs ();
set_fs (KERNEL_DS);
- ret = sys_old_getrlimit(resource, &r);
+ ret = sys_getrlimit(resource, &r);
+ set_fs (old_fs);
+ if (!ret) {
+ ret = put_user (RESOURCE32_OLD(r.rlim_cur), &rlim->rlim_cur);
+ ret |= __put_user (RESOURCE32_OLD(r.rlim_max), &rlim->rlim_max);
+ }
+ return ret;
+}
+
+asmlinkage int sys32_getrlimit(unsigned int resource, struct rlimit32 *rlim)
+{
+ struct rlimit r;
+ int ret;
+ mm_segment_t old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getrlimit(resource, &r);
set_fs (old_fs);
if (!ret) {
ret = put_user (RESOURCE32(r.rlim_cur), &rlim->rlim_cur);
kmsg->msg_control = (void *) orig_cmsg_uptr;
}
+#if 0
asmlinkage int sys32_sendmsg(int fd, struct msghdr32 *user_msg, unsigned user_flags)
{
struct socket *sock;
return err;
return len;
}
+#endif
+
+/*
+ * BSD sendmsg interface
+ */
+
+int sys32_sendmsg(int fd, struct msghdr32 *msg, unsigned flags)
+{
+ struct socket *sock;
+ char address[MAX_SOCK_ADDR];
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */
+ unsigned char *ctl_buf = ctl;
+ struct msghdr msg_sys;
+ int err, ctl_len, iov_size, total_len;
+
+ err = -EFAULT;
+ if (msghdr_from_user32_to_kern(&msg_sys, msg))
+ goto out;
+
+ sock = sockfd_lookup(fd, &err);
+ if (!sock)
+ goto out;
+
+ /* do not move before msg_sys is valid */
+ err = -EINVAL;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area*/
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec32);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /* This will also move the address data into kernel space */
+ err = verify_iovec32(&msg_sys, iov, address, VERIFY_READ);
+ if (err < 0)
+ goto out_freeiov;
+ total_len = err;
+
+ err = -ENOBUFS;
+
+ if (msg_sys.msg_controllen > INT_MAX)
+ goto out_freeiov;
+ ctl_len = msg_sys.msg_controllen;
+ if (ctl_len)
+ {
+ if (ctl_len > sizeof(ctl))
+ {
+ ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
+ if (ctl_buf == NULL)
+ goto out_freeiov;
+ }
+ else if (ctl_len < sizeof(struct cmsghdr))
+ {
+ /* to get same error message as on 31 bit native */
+ err = EOPNOTSUPP;
+ goto out_freeiov;
+ }
+ err = -EFAULT;
+ if (cmsghdr_from_user32_to_kern(&msg_sys, ctl_buf, ctl_len))
+ goto out_freectl;
+// msg_sys.msg_control = ctl_buf;
+ }
+ msg_sys.msg_flags = flags;
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ msg_sys.msg_flags |= MSG_DONTWAIT;
+ err = sock_sendmsg(sock, &msg_sys, total_len);
+
+out_freectl:
+ if (ctl_buf != ctl)
+ sock_kfree_s(sock->sk, ctl_buf, ctl_len);
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ sockfd_put(sock);
+out:
+ return err;
+}
+
+/*
+ * BSD recvmsg interface
+ */
+
+int
+sys32_recvmsg (int fd, struct msghdr32 *msg, unsigned int flags)
+{
+ struct socket *sock;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov=iovstack;
+ struct msghdr msg_sys;
+ unsigned long cmsg_ptr;
+ int err, iov_size, total_len, len;
+
+ /* kernel mode address */
+ char addr[MAX_SOCK_ADDR];
+
+ /* user mode address pointers */
+ struct sockaddr *uaddr;
+ int *uaddr_len;
+ struct scm_cookie scm;
+
+ err=-EFAULT;
+ if (msghdr_from_user32_to_kern(&msg_sys, msg))
+ goto out;
+
+ sock = sockfd_lookup(fd, &err);
+ if (!sock)
+ goto out;
+
+ err = -EINVAL;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area*/
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /*
+ * Save the user-mode address (verify_iovec will change the
+ * kernel msghdr to use the kernel address space)
+ */
+
+ uaddr = msg_sys.msg_name;
+ uaddr_len = &msg->msg_namelen;
+ err = verify_iovec32(&msg_sys, iov, addr, VERIFY_WRITE);
+ if (err < 0)
+ goto out_freeiov;
+ total_len=err;
+
+ cmsg_ptr = (unsigned long)msg_sys.msg_control;
+ msg_sys.msg_flags = 0;
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ memset(&scm, 0, sizeof(scm));
+ err = sock->ops->recvmsg(sock, &msg_sys, total_len,
+ flags, &scm);
+ if (err < 0)
+ goto out_freeiov;
+ len = err;
+
+ if (uaddr != NULL &&
+ /* in order to get same error message as on native 31 bit */
+ msg_sys.msg_namelen > 0) {
+ err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr, uaddr_len);
+ if (err < 0)
+ goto out_freeiov;
+ }
+ if(!msg_sys.msg_control) {
+ if(sock->passcred || scm.fp)
+ msg_sys.msg_flags |= MSG_CTRUNC;
+ if(scm.fp)
+ __scm_destroy(&scm);
+ } else {
+ /* If recvmsg processing itself placed some
+ * control messages into user space, it's is
+ * using 64-bit CMSG processing, so we need
+ * to fix it up before we tack on more stuff.
+ */
+ if((unsigned long) msg_sys.msg_control != cmsg_ptr)
+ cmsg32_recvmsg_fixup(&msg_sys, cmsg_ptr);
+ /* Wheee... */
+ if(sock->passcred)
+ put_cmsg32(&msg_sys,
+ SOL_SOCKET, SCM_CREDENTIALS,
+ sizeof(scm.creds), &scm.creds);
+ if(scm.fp != NULL)
+ scm_detach_fds32(&msg_sys, &scm);
+ }
+
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ sockfd_put(sock);
+out:
+ return err;
+}
extern asmlinkage int sys_setsockopt(int fd, int level, int optname,
char *optval, int optlen);
*/
static int nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32)
{
- return copy_to_user(res32, kres, sizeof(*res32));
+ return copy_to_user(res32, kres, sizeof(*res32)) ? -EFAULT : 0;
}
/*
return sys_pwrite(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
}
+extern asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count);
+
+asmlinkage ssize_t32 sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
+{
+ return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
+}
extern asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count);
return ret;
}
+extern asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd,
+ loff_t *offset, size_t count);
+
+asmlinkage int sys32_sendfile64(int out_fd, int in_fd,
+ __kernel_loff_t32 *offset, s32 count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ loff_t lof;
+
+ if (offset && get_user(lof, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile64(out_fd, in_fd, offset ? &lof : NULL, count);
+ set_fs(old_fs);
+
+ if (offset && put_user(lof, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
/* Handle adjtimex compatability. */
struct timex32 {
return ret;
}
+extern asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long *user_mask_ptr);
+
+asmlinkage int sys32_sched_setaffinity(__kernel_pid_t32 pid, unsigned int len,
+ u32 *user_mask_ptr)
+{
+ unsigned long kernel_mask;
+ mm_segment_t old_fs;
+ int ret;
+
+ if (get_user(kernel_mask, user_mask_ptr))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_sched_setaffinity(pid,
+ /* XXX Nice api... */
+ sizeof(kernel_mask),
+ &kernel_mask);
+ set_fs(old_fs);
+
+ return ret;
+}
+
+extern asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long *user_mask_ptr);
+
+asmlinkage int sys32_sched_getaffinity(__kernel_pid_t32 pid, unsigned int len,
+ u32 *user_mask_ptr)
+{
+ unsigned long kernel_mask;
+ mm_segment_t old_fs;
+ int ret;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_sched_getaffinity(pid,
+ /* XXX Nice api... */
+ sizeof(kernel_mask),
+ &kernel_mask);
+ set_fs(old_fs);
+
+ if (ret == 0) {
+ if (put_user(kernel_mask, user_mask_ptr))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
/*
- * The idle loop on a S390...
+ * Return saved PC of a blocked thread. used in kernel/sched
+ * resume in entry.S does not create a new stack frame, it
+ * just stores the registers %r6-%r15 to the frame given by
+ * schedule. We want to return the address of the caller of
+ * schedule, so we have to walk the backchain one time to
+ * find the frame schedule() store its return address.
*/
-
-static psw_t wait_psw;
-
-int cpu_idle(void *unused)
+unsigned long thread_saved_pc(struct task_struct *tsk)
{
- /* endless idle loop with no priority at all */
- init_idle();
- current->nice = 20;
- wait_psw.mask = _WAIT_PSW_MASK;
- wait_psw.addr = (unsigned long) &&idle_wakeup;
- while(1) {
- if (need_resched()) {
- schedule();
- check_pgt_cache();
- continue;
- }
-
- /* load wait psw */
- asm volatile (
- "lpswe %0"
- : : "m" (wait_psw) );
-idle_wakeup:
- }
+ unsigned long bc;
+
+ bc = *((unsigned long *) tsk->thread.ksp);
+ return *((unsigned long *) (bc+112));
}
/*
- As all the register will only be made displayable to the root
- user ( via printk ) or checking if the uid of the user is 0 from
- the /proc filesystem please god this will be secure enough DJB.
- The lines are given one at a time so as not to chew stack space in
- printk on a crash & also for the proc filesystem when you get
- 0 returned you know you've got all the lines
+ * The idle loop on a S390...
*/
-static int sprintf_regs(int line, char *buff, struct task_struct *task, struct pt_regs *regs)
+void default_idle(void)
{
- int linelen=0;
- int regno,chaincnt;
- u64 backchain,prev_backchain,endchain;
- u64 ksp = 0;
- char *mode = "???";
-
- enum
- {
- sp_linefeed,
- sp_psw,
- sp_ksp,
- sp_gprs,
- sp_gprs1,
- sp_gprs2,
- sp_gprs3,
- sp_gprs4,
- sp_gprs5,
- sp_gprs6,
- sp_gprs7,
- sp_gprs8,
- sp_acrs,
- sp_acrs1,
- sp_acrs2,
- sp_acrs3,
- sp_acrs4,
- sp_kern_backchain,
- sp_kern_backchain1
- };
-
- if (task)
- ksp = task->thread.ksp;
- if (regs && !(regs->psw.mask & PSW_PROBLEM_STATE))
- ksp = regs->gprs[15];
-
- if (regs)
- mode = (regs->psw.mask & PSW_PROBLEM_STATE)?
- "User" : "Kernel";
-
- switch(line)
- {
- case sp_linefeed:
- linelen=sprintf(buff,"\n");
- break;
- case sp_psw:
- if(regs)
- linelen=sprintf(buff, "%s PSW: %016lx %016lx %s\n", mode,
- (unsigned long) regs->psw.mask,
- (unsigned long) regs->psw.addr,
- print_tainted());
- else
- linelen=sprintf(buff,"pt_regs=NULL some info unavailable\n");
- break;
- case sp_ksp:
- linelen=sprintf(&buff[linelen],
- "task: %016lx ksp: %016lx pt_regs: %016lx\n",
- (addr_t)task, (addr_t)ksp, (addr_t)regs);
- break;
- case sp_gprs:
- if(regs)
- linelen=sprintf(buff, "%s GPRS:\n", mode);
- break;
- case sp_gprs1 ... sp_gprs8:
- if(regs)
- {
- regno=(line-sp_gprs1)*2;
- linelen = sprintf(buff,"%016lx %016lx\n",
- regs->gprs[regno],
- regs->gprs[regno+1]);
- }
- break;
- case sp_acrs:
- if(regs)
- linelen=sprintf(buff, "%s ACRS:\n", mode);
- break;
- case sp_acrs1 ... sp_acrs4:
- if(regs)
- {
- regno=(line-sp_acrs1)*4;
- linelen=sprintf(buff,"%08x %08x %08x %08x\n",
- regs->acrs[regno],
- regs->acrs[regno+1],
- regs->acrs[regno+2],
- regs->acrs[regno+3]);
- }
- break;
- case sp_kern_backchain:
- if (regs && (regs->psw.mask & PSW_PROBLEM_STATE))
- break;
- if (ksp)
- linelen=sprintf(buff, "Kernel BackChain CallChain\n");
- break;
- default:
- if (ksp)
- {
-
- backchain=ksp&PSW_ADDR_MASK;
- endchain=((backchain&(-THREAD_SIZE))+THREAD_SIZE);
- prev_backchain=backchain-1;
- line-=sp_kern_backchain1;
- for(chaincnt=0;;chaincnt++)
- {
- if((backchain==0)||(backchain>=endchain)
- ||(chaincnt>=8)||(prev_backchain>=backchain))
- break;
- if(chaincnt==line)
- {
- linelen+=sprintf(&buff[linelen]," %016lx [<%016lx>]\n",
- backchain,
- *(u64 *)(backchain+112)&PSW_ADDR_MASK);
- break;
- }
- prev_backchain=backchain;
- backchain=(*((u64 *)backchain))&PSW_ADDR_MASK;
- }
- }
- }
- return(linelen);
+ psw_t wait_psw;
+ unsigned long reg;
+
+ if (need_resched()) {
+ schedule();
+ return;
+ }
+
+ /*
+ * Wait for external, I/O or machine check interrupt and
+ * switch of machine check bit after the wait has ended.
+ */
+ wait_psw.mask = _WAIT_PSW_MASK;
+ asm volatile (
+ " larl %0,0f\n"
+ " stg %0,8(%1)\n"
+ " lpswe 0(%1)\n"
+ "0: larl %0,1f\n"
+ " stg %0,8(%1)\n"
+ " ni 1(%1),0xf9\n"
+ " lpswe 0(%1)\n"
+ "1:"
+ : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
}
-void show_regs(struct pt_regs *regs)
+int cpu_idle(void)
{
- char buff[80];
- int i, line;
-
- printk("CPU: %d\n",smp_processor_id());
- printk("Process %s (pid: %d, stackpage=%016lX)\n",
- current->comm, current->pid, 4096+(addr_t)current);
-
- for (line = 0; sprintf_regs(line, buff, current, regs); line++)
- printk(buff);
-
- if (regs->psw.mask & PSW_PROBLEM_STATE)
- {
- printk("User Code:\n");
- memset(buff, 0, 20);
- copy_from_user(buff,
- (char *) (regs->psw.addr & PSW_ADDR_MASK), 20);
- for (i = 0; i < 20; i++)
- printk("%02x ", buff[i]);
- printk("\n");
- }
+ for (;;)
+ default_idle();
+ return 0;
}
-char *task_show_regs(struct task_struct *task, char *buffer)
+extern void show_registers(struct pt_regs *regs);
+extern void show_trace(unsigned long *sp);
+
+void show_regs(struct pt_regs *regs)
{
- int line, len;
+ struct task_struct *tsk = current;
- for (line = 0; ; line++)
- {
- len = sprintf_regs(line, buffer, task, NULL);
- if (!len) break;
- buffer += len;
- }
- return buffer;
+ printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted());
+ printk("Process %s (pid: %d, task: %016lx, ksp: %016lx)\n",
+ current->comm, current->pid, (unsigned long) tsk,
+ tsk->thread.ksp);
+
+ show_registers(regs);
+ /* Show stack backtrace if pt_regs is from kernel mode */
+ if (!(regs->psw.mask & PSW_PROBLEM_STATE))
+ show_trace((unsigned long *) regs->gprs[15]);
}
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
current->used_math = 0;
- current->flags &= ~PF_USEDFPU;
+ clear_tsk_thread_flag(current, TIF_USEDFPU);
}
void release_thread(struct task_struct *dead_task)
unsigned long gprs[10]; /* gprs 6 -15 */
unsigned long fprs[2]; /* fpr 4 and 6 */
unsigned long empty[2];
-#if CONFIG_REMOTE_DEBUG
- struct gdb_pt_regs childregs;
-#else
struct pt_regs childregs;
-#endif
} *frame;
- frame = (struct stack_frame *) (4*PAGE_SIZE + (unsigned long) p) -1;
- frame = (struct stack_frame *) (((unsigned long) frame)&-8L);
- p->thread.regs = &frame->childregs;
+ frame = ((struct stack_frame *)
+ (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
p->thread.ksp = (unsigned long) frame;
frame->childregs = *regs;
frame->childregs.gprs[15] = new_stackp;
frame->gprs[8] = (unsigned long) &ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */
- frame->gprs[9] = (unsigned long) frame;
- frame->childregs.old_ilc = -1; /* We are not single stepping an svc */
+ frame->gprs[9] = (unsigned long) frame;
/* save fprs, if used in last task */
save_fp_regs(&p->thread.fp_regs);
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#ifdef CONFIG_S390_SUPPORT
+#include "linux32.h"
+#else
+#define parent_31bit 0
+#endif
+
void FixPerRegisters(struct task_struct *task)
{
- struct pt_regs *regs = task->thread.regs;
+ struct pt_regs *regs = __KSTK_PTREGS(task);
per_struct *per_info=
(per_struct *)&task->thread.per_info;
return retval;
}
+#ifdef CONFIG_S390_SUPPORT
+
+typedef struct
+{
+ __u32 cr[3];
+} per_cr_words32 __attribute__((packed));
+
+typedef struct
+{
+ __u16 perc_atmid; /* 0x096 */
+ __u32 address; /* 0x098 */
+ __u8 access_id; /* 0x0a1 */
+} per_lowcore_words32 __attribute__((packed));
+
+typedef struct
+{
+ union {
+ per_cr_words32 words;
+ } control_regs __attribute__((packed));
+ /*
+ * Use these flags instead of setting em_instruction_fetch
+ * directly they are used so that single stepping can be
+ * switched on & off while not affecting other tracing
+ */
+ unsigned single_step : 1;
+ unsigned instruction_fetch : 1;
+ unsigned : 30;
+ /*
+ * These addresses are copied into cr10 & cr11 if single
+ * stepping is switched off
+ */
+ __u32 starting_addr;
+ __u32 ending_addr;
+ union {
+ per_lowcore_words32 words;
+ } lowcore;
+} per_struct32 __attribute__((packed));
+
+struct user_regs_struct32
+{
+ _psw_t32 psw;
+ u32 gprs[NUM_GPRS];
+ u32 acrs[NUM_ACRS];
+ u32 orig_gpr2;
+ s390_fp_regs fp_regs;
+ /*
+ * These per registers are in here so that gdb can modify them
+ * itself as there is no "official" ptrace interface for hardware
+ * watchpoints. This is the way intel does it.
+ */
+ per_struct32 per_info;
+ u32 ieee_instruction_pointer;
+ /* Used to give failing instruction back to user for ieee exceptions */
+};
+
+struct user32 {
+ /* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct user_regs_struct32 regs; /* Where the registers are actually stored */
+ /* The rest of this junk is to help gdb figure out what goes where */
+ u32 u_tsize; /* Text segment size (pages). */
+ u32 u_dsize; /* Data segment size (pages). */
+ u32 u_ssize; /* Stack segment size (pages). */
+ u32 start_code; /* Starting virtual address of text. */
+ u32 start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ s32 signal; /* Signal that caused the core dump. */
+ u32 u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ u32 magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+};
+
+
+#define PT32_PSWMASK 0x0
+#define PT32_PSWADDR 0x04
+#define PT32_GPR0 0x08
+#define PT32_GPR15 0x44
+#define PT32_ACR0 0x48
+#define PT32_ACR15 0x84
+#define PT32_ORIGGPR2 0x88
+#define PT32_FPC 0x90
+#define PT32_FPR0_HI 0x98
+#define PT32_FPR15_LO 0x114
+#define PT32_CR_9 0x118
+#define PT32_CR_11 0x120
+#define PT32_IEEE_IP 0x13C
+#define PT32_LASTOFF PT32_IEEE_IP
+#define PT32_ENDREGS 0x140-1
+#define U32OFFSETOF(member) offsetof(struct user32,regs.member)
+#define U64OFFSETOF(member) offsetof(struct user,regs.member)
+#define U6432DIFF(member) (U64OFFSETOF(member) - U32OFFSETOF(member))
+#define PT_SINGLE_STEP (PT_CR_11+8)
+#define PT32_SINGLE_STEP (PT32_CR_11+4)
+
+#endif /* CONFIG_S390_SUPPORT */
+
int copy_user(struct task_struct *task,saddr_t useraddr, addr_t copyaddr,
int len, int tofromuser, int writingtouser)
{
int copylen=0,copymax;
addr_t realuseraddr;
saddr_t enduseraddr;
-
unsigned long mask;
-
#ifdef CONFIG_S390_SUPPORT
- if (current->thread.flags & S390_FLAG_31BIT) {
- /* adjust user offsets to 64 bit structure */
- if (useraddr < PT_PSWADDR / 2)
- useraddr = 2 * useraddr;
- else if(useraddr < PT_ACR0 / 2)
- useraddr = 2 * useraddr + sizeof(addr_t) / 2;
- else if(useraddr < PT_ACR0 / 2 + (PT_ORIGGPR2 - PT_ACR0))
- useraddr = useraddr + PT_ACR0 / 2;
- else if(useraddr < PT_ACR0 / 2 + (sizeof(struct user_regs_struct) - sizeof(addr_t) / 2 - PT_ACR0))
- useraddr = useraddr + PT_ACR0 / 2 + sizeof(addr_t) / 2;
- }
-#endif
-
+ int parent_31bit=current->thread.flags & S390_FLAG_31BIT;
+ int skip;
+#endif
enduseraddr=useraddr+len;
-
- if (useraddr < 0 || enduseraddr > sizeof(struct user)||
- (useraddr < PT_ENDREGS && (useraddr&3))||
- (enduseraddr < PT_ENDREGS && (enduseraddr&3)))
+ if ((useraddr<0||useraddr&3||enduseraddr&3)||
+#ifdef CONFIG_S390_SUPPORT
+ (parent_31bit && enduseraddr > sizeof(struct user32)) ||
+#endif
+ enduseraddr > sizeof(struct user))
return (-EIO);
+
+#ifdef CONFIG_S390_SUPPORT
+ if(parent_31bit)
+ {
+ if(useraddr != PT32_PSWMASK)
+ {
+ if (useraddr == PT32_PSWADDR)
+ useraddr = PT_PSWADDR+4;
+ else if(useraddr <= PT32_GPR15)
+ useraddr = ((useraddr-PT32_GPR0)*2) + PT_GPR0+4;
+ else if(useraddr <= PT32_ACR15)
+ useraddr += PT_ACR0-PT32_ACR0;
+ else if(useraddr == PT32_ORIGGPR2)
+ useraddr = PT_ORIGGPR2+4;
+ else if(useraddr <= PT32_FPR15_LO)
+ useraddr += PT_FPR0-PT32_FPR0_HI;
+ else if(useraddr <= PT32_CR_11)
+ useraddr = ((useraddr-PT32_CR_9)*2) + PT_CR_9+4;
+ else if(useraddr == PT32_SINGLE_STEP)
+ useraddr = PT_SINGLE_STEP;
+ else if(useraddr <= U32OFFSETOF(per_info.ending_addr))
+ useraddr = (((useraddr-U32OFFSETOF(per_info.starting_addr)))*2) +
+ U64OFFSETOF(per_info.starting_addr)+4;
+ else if( useraddr == U32OFFSETOF(per_info.lowcore.words.perc_atmid))
+ useraddr = U64OFFSETOF(per_info.lowcore.words.perc_atmid);
+ else if( useraddr == U32OFFSETOF(per_info.lowcore.words.address))
+ useraddr = U64OFFSETOF(per_info.lowcore.words.address)+4;
+ else if(useraddr == U32OFFSETOF(per_info.lowcore.words.access_id))
+ useraddr = U64OFFSETOF(per_info.lowcore.words.access_id);
+ else if(useraddr == PT32_IEEE_IP)
+ useraddr = PT_IEEE_IP+4;
+ }
+ }
+#endif /* CONFIG_S390_SUPPORT */
+
while(len>0)
{
+#ifdef CONFIG_S390_SUPPORT
+ skip=0;
+#endif
mask=PSW_ADDR_MASK;
if(useraddr<PT_FPC)
{
- realuseraddr=(addr_t)&(((u8 *)task->thread.regs)[useraddr]);
- if(useraddr<PT_PSWMASK)
+ realuseraddr=((addr_t) __KSTK_PTREGS(task)) + useraddr;
+ if(useraddr<(PT_PSWMASK+8))
{
- copymax=PT_PSWMASK;
- }
- else if(useraddr<(PT_PSWMASK+8))
- {
- copymax=(PT_PSWMASK+8);
+ if(parent_31bit)
+ {
+ copymax=PT_PSWMASK+4;
+#ifdef CONFIG_S390_SUPPORT
+ skip=8;
+#endif
+ }
+ else
+ {
+ copymax=PT_PSWMASK+8;
+ }
if(writingtouser)
mask=PSW_MASK_DEBUGCHANGE;
}
{
copymax=PT_PSWADDR+8;
mask=PSW_ADDR_DEBUGCHANGE;
+#ifdef CONFIG_S390_SUPPORT
+ if(parent_31bit)
+ skip=4;
+#endif
+
}
else
- copymax=PT_FPC;
-
+ {
+#ifdef CONFIG_S390_SUPPORT
+ if(parent_31bit && useraddr <= PT_GPR15+4)
+ {
+ copymax=useraddr+4;
+ if(useraddr<PT_GPR15+4)
+ skip=4;
+ }
+ else
+#endif
+ copymax=PT_FPC;
+ }
}
else if(useraddr<(PT_FPR15+sizeof(freg_t)))
{
}
else if(useraddr<sizeof(struct user_regs_struct))
{
- copymax=sizeof(struct user_regs_struct);
+#ifdef CONFIG_S390_SUPPORT
+ if( parent_31bit && useraddr <= PT_IEEE_IP+4)
+ {
+ switch(useraddr)
+ {
+ case PT_CR_11+4:
+ case U64OFFSETOF(per_info.ending_addr)+4:
+ case U64OFFSETOF(per_info.lowcore.words.address)+4:
+ copymax=useraddr+4;
+ break;
+ case PT_SINGLE_STEP:
+ case U64OFFSETOF(per_info.lowcore.words.perc_atmid):
+ /* We copy 2 bytes in excess for the atmid member this also gets around */
+ /* alignment for this member in 32 bit */
+ skip=8;
+ copymax=useraddr+4;
+ break;
+ default:
+ copymax=useraddr+4;
+ skip=4;
+ }
+ }
+ else
+#endif
+ {
+ copymax=sizeof(struct user_regs_struct);
+ }
realuseraddr=(addr_t)&(((u8 *)&task->thread.per_info)[useraddr-PT_CR_9]);
}
else
return (-EIO);
copyaddr+=copylen;
len-=copylen;
- useraddr+=copylen;
+ useraddr+=copylen
+#if CONFIG_S390_SUPPORT
+ +skip
+#endif
+ ;
}
FixPerRegisters(task);
return(0);
clear_single_step(child);
}
-asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
-{
- struct task_struct *child;
- int ret = -EPERM;
- unsigned long flags;
- unsigned long tmp;
- int copied;
- ptrace_area parea;
-
- lock_kernel();
- if (request == PTRACE_TRACEME)
- {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out;
- if (request == PTRACE_ATTACH)
- {
- ret = ptrace_attach(child);
- goto out;
- }
- ret = -ESRCH;
- // printk("child=%lX child->flags=%lX",child,child->flags);
- /* I added child!=current line so we can get the */
- /* ieee_instruction_pointer from the user structure DJB */
- if(child!=current)
- {
- if (!(child->ptrace & PT_PTRACED))
- goto out;
- if (child->state != TASK_STOPPED)
- {
- if (request != PTRACE_KILL)
- goto out;
- }
- if (child->p_pptr != current)
- goto out;
- }
- switch (request)
- {
- /* If I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- goto out;
- ret = put_user(tmp,(unsigned long *) data);
- goto out;
-
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR:
- ret=copy_user(child,addr,data,sizeof(unsigned long),1,0);
- break;
-
- /* If I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- goto out;
- ret = -EIO;
- goto out;
- break;
-
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- ret=copy_user(child,addr,(addr_t)&data,sizeof(unsigned long),0,1);
- break;
-
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: /* restart after signal. */
- ret = -EIO;
- if ((unsigned long) data >= _NSIG)
- break;
- if (request == PTRACE_SYSCALL)
- child->ptrace |= PT_TRACESYS;
- else
- child->ptrace &= ~PT_TRACESYS;
- child->exit_code = data;
- /* make sure the single step bit is not set. */
- clear_single_step(child);
- wake_up_process(child);
- ret = 0;
- break;
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->state == TASK_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- clear_single_step(child);
- wake_up_process(child);
- /* make sure the single step bit is not set. */
- break;
-
- case PTRACE_SINGLESTEP: /* set the trap flag. */
- ret = -EIO;
- if ((unsigned long) data >= _NSIG)
- break;
- child->ptrace &= ~PT_TRACESYS;
- child->exit_code = data;
- set_single_step(child);
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- break;
-
- case PTRACE_DETACH: /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
- break;
-
- case PTRACE_PEEKUSR_AREA:
- case PTRACE_POKEUSR_AREA:
- if((ret=copy_from_user(&parea,(void *)addr,sizeof(parea)))==0)
- ret=copy_user(child,parea.kernel_addr,parea.process_addr,
- parea.len,1,(request==PTRACE_POKEUSR_AREA));
- break;
- default:
- ret = -EIO;
- break;
- }
- out:
- unlock_kernel();
- return ret;
-}
-
typedef struct
{
__u32 len;
__u32 process_addr;
} ptrace_area_emu31;
-asmlinkage int sys32_ptrace(long request, long pid, long addr, s32 data)
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
int ret = -EPERM;
- unsigned long flags;
- u32 tmp;
int copied;
- ptrace_area parea;
-
+#ifdef CONFIG_S390_SUPPORT
+ int parent_31bit;
+ int sizeof_parent_long;
+ u8 *dataptr;
+#else
+#define sizeof_parent_long 8
+#define dataptr (u8 *)&data
+#endif
lock_kernel();
if (request == PTRACE_TRACEME)
{
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* you may not mess with init */
- goto out;
+ goto out_tsk;
if (request == PTRACE_ATTACH)
{
ret = ptrace_attach(child);
- goto out;
+ goto out_tsk;
}
ret = -ESRCH;
// printk("child=%lX child->flags=%lX",child,child->flags);
if(child!=current)
{
if (!(child->ptrace & PT_PTRACED))
- goto out;
+ goto out_tsk;
if (child->state != TASK_STOPPED)
{
if (request != PTRACE_KILL)
- goto out;
+ goto out_tsk;
}
- if (child->p_pptr != current)
- goto out;
+ if (child->parent != current)
+ goto out_tsk;
}
+#ifdef CONFIG_S390_SUPPORT
+ parent_31bit=(current->thread.flags & S390_FLAG_31BIT);
+ sizeof_parent_long=(parent_31bit ? 4:8);
+ dataptr=&(((u8 *)&data)[parent_31bit ? 4:0]);
+#endif
switch (request)
{
/* If I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ {
+ u8 tmp[8];
+ copied = access_process_vm(child, addr, tmp, sizeof_parent_long, 0);
ret = -EIO;
- if (copied != sizeof(tmp))
- goto out;
- ret = put_user(tmp,(u32 *)(unsigned long)data);
- goto out;
-
+ if (copied != sizeof_parent_long)
+ break;
+ ret = copy_to_user((void *)data,tmp,sizeof_parent_long);
+ break;
+
+ }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
- ret=copy_user(child,addr,data,sizeof(u32),1,0);
+ ret=copy_user(child,addr,data,sizeof_parent_long,1,0);
break;
/* If I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- goto out;
+ if (access_process_vm(child, addr,dataptr, sizeof_parent_long, 1) == sizeof_parent_long)
+ break;
ret = -EIO;
- goto out;
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- ret=copy_user(child,addr,(addr_t)&data,sizeof(u32),0,1);
+ ret=copy_user(child,addr,(addr_t)dataptr,sizeof_parent_long,0,1);
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
if ((unsigned long) data >= _NSIG)
break;
if (request == PTRACE_SYSCALL)
- child->ptrace |= PT_TRACESYS;
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
- child->ptrace &= ~PT_TRACESYS;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* make sure the single step bit is not set. */
clear_single_step(child);
ret = -EIO;
if ((unsigned long) data >= _NSIG)
break;
- child->ptrace &= ~PT_TRACESYS;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
set_single_step(child);
/* give it a chance to run. */
break;
case PTRACE_DETACH: /* detach a process that was attached. */
- ret = -EIO;
- if ((unsigned long) data >= _NSIG)
- break;
- child->ptrace &= ~(PT_PTRACED|PT_TRACESYS);
- child->exit_code = data;
- write_lock_irqsave(&tasklist_lock, flags);
- REMOVE_LINKS(child);
- child->p_pptr = child->p_opptr;
- SET_LINKS(child);
- write_unlock_irqrestore(&tasklist_lock, flags);
- /* make sure the single step bit is not set. */
- clear_single_step(child);
- wake_up_process(child);
- ret = 0;
+ ret = ptrace_detach(child, data);
break;
+
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
+ if(parent_31bit)
{
- ptrace_area_emu31 * parea31 = (void *)addr;
- if (!access_ok(VERIFY_READ, parea31, sizeof(*parea31)))
- return(-EFAULT);
- ret = __get_user(parea.len, &parea31->len);
- ret |= __get_user(parea.kernel_addr, &parea31->kernel_addr);
- ret |= __get_user(parea.process_addr, &parea31->process_addr);
- if(ret==0)
- ret=copy_user(child,parea.kernel_addr,parea.process_addr,
- parea.len,1,(request==PTRACE_POKEUSR_AREA));
- break;
+ ptrace_area_emu31 parea;
+ if((ret=copy_from_user(&parea,(void *)addr,sizeof(parea)))==0)
+ ret=copy_user(child,parea.kernel_addr,parea.process_addr,
+ parea.len,1,(request==PTRACE_POKEUSR_AREA));
}
+ else
+ {
+ ptrace_area parea;
+ if((ret=copy_from_user(&parea,(void *)addr,sizeof(parea)))==0)
+ ret=copy_user(child,parea.kernel_addr,parea.process_addr,
+ parea.len,1,(request==PTRACE_POKEUSR_AREA));
+ }
+ break;
+ case PTRACE_SETOPTIONS: {
+ if (data & PTRACE_O_TRACESYSGOOD)
+ child->ptrace |= PT_TRACESYSGOOD;
+ else
+ child->ptrace &= ~PT_TRACESYSGOOD;
+ ret = 0;
+ break;
+ }
default:
ret = -EIO;
break;
}
+ out_tsk:
+ put_task_struct(child);
out:
unlock_kernel();
return ret;
}
+
+
asmlinkage void syscall_trace(void)
{
- lock_kernel();
- if ((current->ptrace & (PT_PTRACED|PT_TRACESYS))
- != (PT_PTRACED|PT_TRACESYS))
- goto out;
- current->exit_code = SIGTRAP;
- set_current_state(TASK_STOPPED);
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0);
+ current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
/*
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
- out:
- unlock_kernel();
}
oi .Lschib+5-.Lpg0(%r13),0x84
.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
msch .Lschib-.Lpg0(%r13)
- ssch .Liplorb-.Lpg0(%r13)
- jz .L001
+ lghi %r0,5
+.Lssch: ssch .Liplorb-.Lpg0(%r13)
+ jz .L001
+ brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/errno.h>
#include <asm/lowcore.h>
#include <asm/s390_ext.h>
* iucv and 0x2603 pfault) this is always the first element.
*/
ext_int_info_t *ext_int_hash[256] = { 0, };
-ext_int_info_t ext_int_info_timer;
-ext_int_info_t ext_int_info_hwc;
-ext_int_info_t ext_int_pfault;
int register_external_interrupt(__u16 code, ext_int_handler_t handler) {
ext_int_info_t *p;
int index;
- index = code & 0xff;
- p = ext_int_hash[index];
- while (p != NULL) {
- if (p->code == code)
- return -EBUSY;
- p = p->next;
- }
- if (code == 0x1004) /* time_init is done before kmalloc works :-/ */
- p = &ext_int_info_timer;
- else if (code == 0x2401) /* hwc_init is done too early too */
- p = &ext_int_info_hwc;
- else if (code == 0x2603) /* pfault_init is done too early too */
- p = &ext_int_pfault;
- else
- p = (ext_int_info_t *)
- kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
+ p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
if (p == NULL)
return -ENOMEM;
p->code = code;
p->handler = handler;
+ index = code & 0xff;
+ p->next = ext_int_hash[index];
+ ext_int_hash[index] = p;
+ return 0;
+}
+
+int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+ ext_int_info_t *p) {
+ int index;
+
+ if (p == NULL)
+ return -EINVAL;
+ p->code = code;
+ p->handler = handler;
+ index = code & 0xff;
p->next = ext_int_hash[index];
ext_int_hash[index] = p;
return 0;
q->next = p->next;
else
ext_int_hash[index] = p->next;
- if (code != 0x1004 && code != 0x2401 && code != 0x2603)
- kfree(p);
+ kfree(p);
return 0;
}
+int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+ ext_int_info_t *p) {
+ ext_int_info_t *q;
+ int index;
+
+ if (p == NULL || p->code != code || p->handler != handler)
+ return -EINVAL;
+ index = code & 0xff;
+ q = ext_int_hash[index];
+ if (p != q) {
+ while (q != NULL) {
+ if (q->next == p)
+ break;
+ q = q->next;
+ }
+ if (q == NULL)
+ return -ENOENT;
+ q->next = p->next;
+ } else
+ ext_int_hash[index] = p->next;
+ return 0;
+}
+
EXPORT_SYMBOL(register_external_interrupt);
EXPORT_SYMBOL(unregister_external_interrupt);
#include <linux/highuid.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/smp.h>
#include <asm/checksum.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
+#include <asm/softirq.h>
#if CONFIG_IP_MULTICAST
#include <net/arp.h>
#endif
EXPORT_SYMBOL_NOVERS(_oi_bitmap);
EXPORT_SYMBOL_NOVERS(_ni_bitmap);
EXPORT_SYMBOL_NOVERS(_zb_findmap);
-EXPORT_SYMBOL_NOVERS(__copy_from_user_fixup);
-EXPORT_SYMBOL_NOVERS(__copy_to_user_fixup);
+EXPORT_SYMBOL_NOVERS(__copy_from_user_asm);
+EXPORT_SYMBOL_NOVERS(__copy_to_user_asm);
+EXPORT_SYMBOL_NOVERS(__clear_user_asm);
/*
* semaphore ops
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
/*
* string functions
EXPORT_SYMBOL_NOVERS(memcmp);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(memscan);
EXPORT_SYMBOL_NOVERS(strlen);
EXPORT_SYMBOL_NOVERS(strchr);
EXPORT_SYMBOL_NOVERS(strcmp);
EXPORT_SYMBOL_NOVERS(strnlen);
EXPORT_SYMBOL_NOVERS(strrchr);
EXPORT_SYMBOL_NOVERS(strstr);
-EXPORT_SYMBOL_NOVERS(strsep);
EXPORT_SYMBOL_NOVERS(strpbrk);
/*
EXPORT_SYMBOL(console_mode);
EXPORT_SYMBOL(console_device);
EXPORT_SYMBOL_NOVERS(do_call_softirq);
-
/*
- * linux/arch/S390/kernel/semaphore.c
+ * linux/arch/s390x/kernel/semaphore.c
*
* S390 version
* Copyright (C) 1998-2000 IBM Corporation
*
*/
#include <linux/sched.h>
+#include <linux/errno.h>
#include <asm/semaphore.h>
/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
+ * Atomically update sem->count. Equivalent to:
+ * old_val = sem->count.counter;
+ * new_val = ((old_val >= 0) ? old_val : 0) + incr;
+ * sem->count.counter = new_val;
+ * return old_val;
*/
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+ int old_val, new_val;
+
+ __asm__ __volatile__(" l %0,0(%3)\n"
+ "0: ltr %1,%0\n"
+ " jhe 1f\n"
+ " lhi %1,0\n"
+ "1: ar %1,%4\n"
+ " cs %0,%1,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old_val), "=&d" (new_val),
+ "+m" (sem->count)
+ : "a" (&sem->count), "d" (incr) : "cc" );
+ return old_val;
+}
/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
+ * The inline function up() incremented count but the result
+ * was <= 0. This indicates that some process is waiting on
+ * the semaphore. The semaphore is free and we'll wake the
+ * first sleeping process, so we set count to 1 unless some
+ * other cpu has called up in the meantime in which case
+ * we just increment count by 1.
*/
-
void __up(struct semaphore *sem)
{
+ __sem_update_count(sem, 1);
wake_up(&sem->wait);
}
-static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
-
+/*
+ * The inline function down() decremented count and the result
+ * was < 0. The wait loop will atomically test and update the
+ * semaphore counter following the rules:
+ * count > 0: decrement count, wake up queue and exit.
+ * count <= 0: set count to -1, go to sleep.
+ */
void __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
+
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
+ while (__sem_update_count(sem, -1) <= 0) {
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
}
- spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
+/*
+ * Same as __down() with an additional test for signals.
+ * If a signal is pending the count is updated as follows:
+ * count > 0: wake up queue and exit.
+ * count <= 0: set count to 0, wake up queue and exit.
+ */
int __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
+
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers ++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
+ while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
+ __sem_update_count(sem, 0);
retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock. The
- * "-1" is because we're still hoping to get
- * the lock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
break;
}
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
schedule();
tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
}
- spin_unlock_irq(&semaphore_lock);
- tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
return retval;
}
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- */
-int __down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- int sleepers;
-
- spin_lock_irqsave(&semaphore_lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers, &sem->count))
- wake_up(&sem->wait);
-
- spin_unlock_irqrestore(&semaphore_lock, flags);
- return 1;
-}
unsigned int console_device = -1;
unsigned long memory_size = 0;
unsigned long machine_flags = 0;
-struct { unsigned long addr, size, type; } memory_chunk[16];
+struct { unsigned long addr, size, type; } memory_chunk[16] = { { 0 } };
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
__u16 boot_cpu_addr;
/*
* Force FPU initialization:
*/
- current->flags &= ~PF_USEDFPU;
+ clear_thread_flag(TIF_USEDFPU);
current->used_math = 0;
/* Setup active_mm for idle_task */
static int __init conmode_setup(char *str)
{
#if defined(CONFIG_HWC_CONSOLE)
- if (strncmp(str, "hwc", 4) == 0 && !MACHINE_IS_P390)
+ if (strncmp(str, "hwc", 4) == 0)
SET_CONSOLE_HWC;
#endif
#if defined(CONFIG_TN3215_CONSOLE)
- if (strncmp(str, "3215", 5) == 0 && (MACHINE_IS_VM || MACHINE_IS_P390))
+ if (strncmp(str, "3215", 5) == 0)
SET_CONSOLE_3215;
#endif
#if defined(CONFIG_TN3270_CONSOLE)
- if (strncmp(str, "3270", 5) == 0 && (MACHINE_IS_VM || MACHINE_IS_P390))
+ if (strncmp(str, "3270", 5) == 0)
SET_CONSOLE_3270;
#endif
return 1;
}
}
+#ifdef CONFIG_SMP
+extern void machine_restart_smp(char *);
+extern void machine_halt_smp(void);
+extern void machine_power_off_smp(void);
+
+void (*_machine_restart)(char *command) = machine_restart_smp;
+void (*_machine_halt)(void) = machine_halt_smp;
+void (*_machine_power_off)(void) = machine_power_off_smp;
+#else
/*
* Reboot, halt and power_off routines for non SMP.
*/
-#ifndef CONFIG_SMP
-void machine_restart(char * __unused)
+static void do_machine_restart_nonsmp(char * __unused)
{
reipl(S390_lowcore.ipl_device);
}
-void machine_halt(void)
+static void do_machine_halt_nonsmp(void)
{
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
-void machine_power_off(void)
+static void do_machine_power_off_nonsmp(void)
{
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
+
+void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
+void (*_machine_halt)(void) = do_machine_halt_nonsmp;
+void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
#endif
+/*
+ * Reboot, halt and power_off stubs. They just call _machine_restart,
+ * _machine_halt or _machine_power_off.
+ */
+
+void machine_restart(char *command)
+{
+ _machine_restart(command);
+}
+
+void machine_halt(void)
+{
+ _machine_halt();
+}
+
+void machine_power_off(void)
+{
+ _machine_power_off();
+}
+
/*
* Setup function called from init/main.c just after the banner
* was printed.
* print what head.S has found out about the machine
*/
printk((MACHINE_IS_VM) ?
- "We are running under VM\n" :
- "We are running native\n");
+ "We are running under VM (64 bit mode)\n" :
+ "We are running native (64 bit mode)\n");
ROOT_DEV = to_kdev_t(0x0100);
memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/
lowcore->io_new_psw.mask = _IO_PSW_MASK;
lowcore->io_new_psw.addr = (addr_t) &io_int_handler;
lowcore->ipl_device = S390_lowcore.ipl_device;
- lowcore->kernel_stack = ((__u32) &init_task_union) + 16384;
+ lowcore->kernel_stack = ((__u64) &init_thread_union) + 16384;
lowcore->async_stack = (__u64)
__alloc_bootmem(4*PAGE_SIZE, 4*PAGE_SIZE, 0) + 16384;
+ lowcore->jiffy_timer = -1LL;
set_prefix((__u32)(__u64) lowcore);
cpu_init();
boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct cpuinfo_S390 *cpuinfo;
- unsigned n = v;
+ unsigned long n = (unsigned long) v - 1;
- if (!n--) {
+ if (!n) {
seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n",
smp_num_cpus, loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
- } else if (cpu_online_map & (1 << n)) {
- cpuinfo = &safe_get_cpu_lowcore(n).cpu_data;
- seq_printf(m, "processor %i: "
+ }
+ if (cpu_online_map & (1 << n)) {
+ cpuinfo = &safe_get_cpu_lowcore(n)->cpu_data;
+ seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos <= NR_CPUS ? (void)(*pos+1) : NULL;
+ return *pos <= NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
+#include <linux/tty.h>
#include <linux/personality.h>
+#include <linux/binfmts.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
static inline int map_signal(int sig)
{
- if (current->exec_domain
- && current->exec_domain->signal_invmap
+ if (current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
&& sig < 32)
- return current->exec_domain->signal_invmap[sig];
+ return current_thread_info()->exec_domain->signal_invmap[sig];
else
return sig;
}
goto give_sigsegv;
}
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (addr_t *) frame))
+ goto give_sigsegv;
+
/* Set up registers for signal handler */
regs->gprs[15] = (addr_t)frame;
regs->psw.addr = FIX_PSW(ka->sa.sa_handler);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->gprs[15]),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
(u16 *)(frame->retcode));
}
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (addr_t *) frame))
+ goto give_sigsegv;
+
/* Set up registers for signal handler */
regs->gprs[15] = (addr_t)frame;
regs->psw.addr = FIX_PSW(ka->sa.sa_handler);
int do_signal(struct pt_regs *regs, sigset_t *oldset)
{
siginfo_t info;
+ int signr;
/*
* We want the common case to go fast, which
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
+#include <linux/tty.h>
#include <linux/personality.h>
+#include <linux/binfmts.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include "linux32.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-#define _USER_PSW_MASK32 0x0701C00080000000
+#define _USER_PSW_MASK32 0x0705C00080000000
typedef struct
{
err |= __put_user(from->si_status, &to->si_status);
break;
case __SI_FAULT >> 16:
- err |= __put_user(from->si_addr, &to->si_addr);
+ err |= __put_user((unsigned long) from->si_addr,
+ &to->si_addr);
break;
case __SI_POLL >> 16:
case __SI_TIMER >> 16:
if (uss) {
if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
return -EFAULT;
- err |= __get_user(kss.ss_sp, &uss->ss_sp);
+ err |= __get_user((unsigned long) kss.ss_sp, &uss->ss_sp);
err |= __get_user(kss.ss_size, &uss->ss_size);
err |= __get_user(kss.ss_flags, &uss->ss_flags);
if (err)
if (!ret && uoss) {
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
return -EFAULT;
- err |= __put_user(koss.ss_sp, &uoss->ss_sp);
+ err |= __put_user((unsigned long) koss.ss_sp, &uoss->ss_sp);
err |= __put_user(koss.ss_size, &uoss->ss_size);
err |= __put_user(koss.ss_flags, &uoss->ss_flags);
if (err)
rt_sigframe32 *frame = (rt_sigframe32 *)regs->gprs[15];
sigset_t set;
stack_t st;
+ __u32 ss_sp;
int err;
mm_segment_t old_fs = get_fs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
- err = __get_user(st.ss_sp, &frame->uc.uc_stack.ss_sp);
- st.ss_sp = (void *) A((unsigned long)st.ss_sp);
+ err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
+ st.ss_sp = (void *) A((unsigned long)ss_sp);
err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
if (err)
static inline int map_signal(int sig)
{
- if (current->exec_domain
- && current->exec_domain->signal_invmap
+ if (current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
&& sig < 32)
- return current->exec_domain->signal_invmap[sig];
+ return current_thread_info()->exec_domain->signal_invmap[sig];
else
return sig;
}
if (save_sigregs32(regs, &frame->sregs))
goto give_sigsegv;
- if (__put_user(&frame->sregs, &frame->sc.sregs))
+ if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
goto give_sigsegv;
/* Set up to return from userspace. If provided, use a stub
goto give_sigsegv;
}
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (unsigned int *) frame))
+ goto give_sigsegv;
+
/* Set up registers for signal handler */
regs->gprs[15] = (addr_t)frame;
regs->psw.addr = FIX_PSW(ka->sa.sa_handler);
(u16 *)(frame->retcode));
}
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (unsigned int *) frame))
+ goto give_sigsegv;
+
/* Set up registers for signal handler */
regs->gprs[15] = (addr_t)frame;
regs->psw.addr = FIX_PSW(ka->sa.sa_handler);
static int max_cpus = NR_CPUS; /* Setup configured maximum number of CPUs to activate */
int smp_num_cpus;
struct _lowcore *lowcore_ptr[NR_CPUS];
-unsigned int prof_multiplier[NR_CPUS];
-unsigned int prof_old_multiplier[NR_CPUS];
-unsigned int prof_counter[NR_CPUS];
cycles_t cacheflush_time=0;
int smp_threads_ready=0; /* Set when the idlers are all forked. */
static atomic_t smp_commenced = ATOMIC_INIT(0);
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-unsigned long cpu_online_map;
+volatile unsigned long phys_cpu_present_map;
+volatile unsigned long cpu_online_map;
+unsigned long cache_decay_ticks = 0;
/*
* Setup routine for controlling SMP activation
return 0;
}
+static inline void do_send_stop(void)
+{
+ u32 dummy;
+ int i, rc;
+
+ /* stop all processors */
+ for (i = 0; i < smp_num_cpus; i++) {
+ if (smp_processor_id() == i)
+ continue;
+ do {
+ rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
+ } while (rc == sigp_busy);
+ }
+}
+
+static inline void do_store_status(void)
+{
+ unsigned long low_core_addr;
+ u32 dummy;
+ int i, rc;
+
+ /* store status of all processors in their lowcores (real 0) */
+ for (i = 0; i < smp_num_cpus; i++) {
+ if (smp_processor_id() == i)
+ continue;
+ low_core_addr = (unsigned long)get_cpu_lowcore(i);
+ do {
+ rc = signal_processor_ps(&dummy, low_core_addr, i,
+ sigp_store_status_at_address);
+ } while(rc == sigp_busy);
+ }
+}
+
+/*
+ * this function sends a 'stop' sigp to all other CPUs in the system.
+ * it goes straight through.
+ */
+void smp_send_stop(void)
+{
+ /* write magic number to zero page (absolute 0) */
+ get_cpu_lowcore(smp_processor_id())->panic_magic = __PANIC_MAGIC;
+
+ /* stop other processors. */
+ do_send_stop();
+
+ /* store status of other processors. */
+ do_store_status();
+}
/*
- * Various special callbacks
+ * Reboot, halt and power_off routines for SMP.
*/
+static volatile unsigned long cpu_restart_map;
-void do_machine_restart(void)
+static void do_machine_restart(void * __unused)
{
- smp_send_stop();
- reipl(S390_lowcore.ipl_device);
+ clear_bit(smp_processor_id(), &cpu_restart_map);
+ if (smp_processor_id() == 0) {
+ /* Wait for all other cpus to enter do_machine_restart. */
+ while (cpu_restart_map != 0);
+ /* Store status of other cpus. */
+ do_store_status();
+ /*
+ * Finally call reipl. Because we waited for all other
+ * cpus to enter this function we know that they do
+ * not hold any s390irq-locks (the cpus have been
+ * interrupted by an external interrupt and s390irq
+ * locks are always held disabled).
+ */
+ reipl(S390_lowcore.ipl_device);
+ }
+ signal_processor(smp_processor_id(), sigp_stop);
}
-void machine_restart(char * __unused)
+void machine_restart_smp(char * __unused)
{
- if (smp_processor_id() != 0) {
- smp_ext_bitcall(0, ec_restart);
- for (;;);
- } else
- do_machine_restart();
+ cpu_restart_map = cpu_online_map;
+ smp_call_function(do_machine_restart, NULL, 0, 0);
+ do_machine_restart(NULL);
}
-void do_machine_halt(void)
+static void do_machine_halt(void * __unused)
{
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- cpcmd(vmhalt_cmd, NULL, 0);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+ if (smp_processor_id() == 0) {
+ smp_send_stop();
+ if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+ cpcmd(vmhalt_cmd, NULL, 0);
+ signal_processor(smp_processor_id(),
+ sigp_stop_and_store_status);
+ }
+ for (;;)
+ enabled_wait();
}
-void machine_halt(void)
+void machine_halt_smp(void)
{
- if (smp_processor_id() != 0) {
- smp_ext_bitcall(0, ec_halt);
- for (;;);
- } else
- do_machine_halt();
+ smp_call_function(do_machine_halt, NULL, 0, 0);
+ do_machine_halt(NULL);
}
-void do_machine_power_off(void)
+static void do_machine_power_off(void * __unused)
{
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- cpcmd(vmpoff_cmd, NULL, 0);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+ if (smp_processor_id() == 0) {
+ smp_send_stop();
+ if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+ cpcmd(vmpoff_cmd, NULL, 0);
+ signal_processor(smp_processor_id(),
+ sigp_stop_and_store_status);
+ }
+ for (;;)
+ enabled_wait();
}
-void machine_power_off(void)
+void machine_power_off_smp(void)
{
- if (smp_processor_id() != 0) {
- smp_ext_bitcall(0, ec_power_off);
- for (;;);
- } else
- do_machine_power_off();
+ smp_call_function(do_machine_power_off, NULL, 0, 0);
+ do_machine_power_off(NULL);
}
/*
*
* For the ec_schedule signal we have to do nothing. All the work
* is done automatically when we return from the interrupt.
- * For the ec_restart, ec_halt and ec_power_off we call the
- * appropriate routine.
*/
bits = xchg(&S390_lowcore.ext_call_fast, 0);
- if (test_bit(ec_restart, &bits))
- do_machine_restart();
- if (test_bit(ec_halt, &bits))
- do_machine_halt();
- if (test_bit(ec_power_off, &bits))
- do_machine_power_off();
if (test_bit(ec_call_function, &bits))
do_call_function();
}
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
- set_bit(sig, &(get_cpu_lowcore(cpu).ext_call_fast));
+ set_bit(sig, &(get_cpu_lowcore(cpu)->ext_call_fast));
ccode = signal_processor(cpu, sigp_external_call);
return ccode;
}
*/
static void smp_ext_bitcall_others(ec_bit_sig sig)
{
- sigp_ccode ccode;
int i;
for (i = 0; i < smp_num_cpus; i++) {
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
- set_bit(sig, &(get_cpu_lowcore(i).ext_call_fast));
- ccode = signal_processor(i, sigp_external_call);
- }
-}
-
-/*
- * this function sends a 'stop' sigp to all other CPUs in the system.
- * it goes straight through.
- */
-
-void smp_send_stop(void)
-{
- int i;
- u32 dummy;
- unsigned long low_core_addr;
-
- /* write magic number to zero page (absolute 0) */
-
- get_cpu_lowcore(smp_processor_id()).panic_magic = __PANIC_MAGIC;
-
- /* stop all processors */
-
- for (i = 0; i < smp_num_cpus; i++) {
- if (smp_processor_id() != i) {
- int ccode;
- do {
- ccode = signal_processor_ps(
- &dummy,
- 0,
- i,
- sigp_stop);
- } while(ccode == sigp_busy);
- }
- }
-
- /* store status of all processors in their lowcores (real 0) */
-
- for (i = 0; i < smp_num_cpus; i++) {
- if (smp_processor_id() != i) {
- int ccode;
- low_core_addr = (unsigned long)&get_cpu_lowcore(i);
- do {
- ccode = signal_processor_ps(
- &dummy,
- low_core_addr,
- i,
- sigp_store_status_at_address);
- } while(ccode == sigp_busy);
- }
+ set_bit(sig, &(get_cpu_lowcore(i)->ext_call_fast));
+ while (signal_processor(i, sigp_external_call) == sigp_busy)
+ udelay(10);
}
}
{
int curr_cpu;
- current->processor = 0;
+ current_thread_info()->cpu = 0;
smp_num_cpus = 1;
+ phys_cpu_present_map = 1;
+ cpu_online_map = 1;
for (curr_cpu = 0;
curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) {
if ((__u16) curr_cpu == boot_cpu_addr)
if (signal_processor(smp_num_cpus, sigp_sense) ==
sigp_not_operational)
continue;
+ set_bit(smp_num_cpus, &phys_cpu_present_map);
smp_num_cpus++;
}
printk("Detected %d CPU's\n",(int) smp_num_cpus);
/*
* Activate a secondary processor.
*/
-extern void init_100hz_timer(void);
+extern void init_cpu_timer(void);
extern int pfault_init(void);
int __init start_secondary(void *cpuvoid)
{
/* Setup the cpu */
cpu_init();
+ /* Mark this cpu as online. */
+ set_bit(smp_processor_id(), &cpu_online_map);
/* Print info about this processor */
- print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id()).cpu_data);
+ print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id())->cpu_data);
/* Wait for completion of smp startup */
while (!atomic_read(&smp_commenced))
/* nothing */ ;
- /* init per CPU 100 hz timer */
- init_100hz_timer();
+ /* init per CPU timer */
+ init_cpu_timer();
#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
return cpu_idle(NULL);
}
-/*
- * The restart interrupt handler jumps to start_secondary directly
- * without the detour over initialize_secondary. We defined it here
- * so that the linker doesn't complain.
- */
-void __init initialize_secondary(void)
-{
-}
-
static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle->processor = cpu;
- idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
+ init_idle(idle, cpu);
- del_from_runqueue(idle);
unhash_process(idle);
- init_tasks[cpu] = idle;
- cpu_lowcore=&get_cpu_lowcore(cpu);
+ cpu_lowcore = get_cpu_lowcore(cpu);
cpu_lowcore->save_area[15] = idle->thread.ksp;
- cpu_lowcore->kernel_stack = (idle->thread.ksp | 16383) + 1;
+ cpu_lowcore->kernel_stack = (__u64) idle->thread_info + 16384;
__asm__ __volatile__("la 1,%0\n\t"
"stctg 0,15,0(1)\n\t"
"la 1,%1\n\t"
eieio();
signal_processor(cpu,sigp_restart);
- /* Mark this cpu as online. */
- set_bit(cpu, &cpu_online_map);
}
/*
void __init smp_boot_cpus(void)
{
- struct _lowcore *curr_lowcore;
unsigned long async_stack;
sigp_ccode ccode;
int i;
/*
* Initialize the logical to physical CPU number mapping
- * and the per-CPU profiling counter/multiplier
*/
-
- for (i = 0; i < NR_CPUS; i++) {
- prof_counter[i] = 1;
- prof_old_multiplier[i] = 1;
- prof_multiplier[i] = 1;
- }
+ print_cpu_info(&safe_get_cpu_lowcore(0)->cpu_data);
- print_cpu_info(&safe_get_cpu_lowcore(0).cpu_data);
-
- for(i = 0; i < smp_num_cpus; i++)
- {
- curr_lowcore = (struct _lowcore *)
+ for(i = 0; i < smp_num_cpus; i++) {
+ lowcore_ptr[i] = (struct _lowcore *)
__get_free_pages(GFP_KERNEL|GFP_DMA, 1);
- if (curr_lowcore == NULL) {
- printk("smp_boot_cpus failed to allocate prefix memory\n");
- break;
- }
async_stack = __get_free_pages(GFP_KERNEL,2);
- if (async_stack == 0) {
- printk("smp_boot_cpus failed to allocate asyncronous"
- " interrupt stack\n");
- free_page((unsigned long) curr_lowcore);
- break;
- }
- lowcore_ptr[i] = curr_lowcore;
- memcpy(curr_lowcore, &S390_lowcore, sizeof(struct _lowcore));
- curr_lowcore->async_stack = async_stack + (4 * PAGE_SIZE);
+ if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
+ panic("smp_boot_cpus failed to allocate memory\n");
+ memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore));
+ lowcore_ptr[i]->async_stack = async_stack + (4 * PAGE_SIZE);
/*
* Most of the parameters are set up when the cpu is
* started up.
*/
- if (smp_processor_id() == i)
- set_prefix((u32)(u64)curr_lowcore);
- else {
- ccode = signal_processor_p((u64)(curr_lowcore),
- i, sigp_set_prefix);
- if(ccode) {
- /* if this gets troublesome I'll have to do
- * something about it. */
- printk("ccode %d for cpu %d returned when "
- "setting prefix in smp_boot_cpus not good.\n",
- (int) ccode, (int) i);
- }
- else
- do_boot_cpu(i);
- }
+ if (smp_processor_id() == i) {
+ set_prefix((u32)(u64) lowcore_ptr[i]);
+ continue;
+ }
+ ccode = signal_processor_p((u64) lowcore_ptr[i],
+ i, sigp_set_prefix);
+ if(ccode)
+ panic("sigp_set_prefix failed for cpu %d "
+ "with condition code %d\n",
+ (int) i, (int) ccode);
+ do_boot_cpu(i);
}
+ /*
+ * Now wait until all of the cpus are online.
+ */
+ while (phys_cpu_present_map != cpu_online_map);
}
/*
return 0;
}
-/*
- * Local timer interrupt handler. It does both profiling and
- * process statistics/rescheduling.
- *
- * We do profiling in every local tick, statistics/rescheduling
- * happen only every 'profiling multiplier' ticks. The default
- * multiplier is 1 and it can be changed by writing the new multiplier
- * value into /proc/profile.
- */
-
-void smp_local_timer_interrupt(struct pt_regs * regs)
-{
- int user = (user_mode(regs) != 0);
- int cpu = smp_processor_id();
-
- /*
- * The profiling function is SMP safe. (nothing can mess
- * around with "current", and the profiling counters are
- * updated with atomic operations). This is especially
- * useful with a profiling multiplier != 1
- */
- if (!user_mode(regs))
- s390_do_profile(regs->psw.addr);
-
- if (!--prof_counter[cpu]) {
-
- /*
- * The multiplier may have changed since the last time we got
- * to this point as a result of the user writing to
- * /proc/profile. In this case we need to adjust the APIC
- * timer accordingly.
- *
- * Interrupts are already masked off at this point.
- */
- prof_counter[cpu] = prof_multiplier[cpu];
- if (prof_counter[cpu] != prof_old_multiplier[cpu]) {
- prof_old_multiplier[cpu] = prof_counter[cpu];
- }
-
- /*
- * After doing the above, we need to make like
- * a normal interrupt - otherwise timer interrupts
- * ignore the global interrupt lock, which is the
- * WrongThing (tm) to do.
- */
-
- update_process_times(user);
- }
-}
-
EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(smp_call_function);
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
+#include <linux/personality.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
return -ERESTARTNOHAND;
}
+extern asmlinkage int sys_newuname(struct new_utsname * name);
+
+asmlinkage int s390x_newuname(struct new_utsname * name)
+{
+ int ret = sys_newuname(name);
+
+ if (current->personality == PER_LINUX32 && !ret) {
+ ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
+ if (ret) ret = -EFAULT;
+ }
+ return ret;
+}
+
+extern asmlinkage long sys_personality(unsigned long);
+
+asmlinkage int s390x_personality(unsigned long personality)
+{
+ int ret;
+
+ if (current->personality == PER_LINUX32 && personality == PER_LINUX)
+ personality = PER_LINUX32;
+ ret = sys_personality(personality);
+ if (ret == PER_LINUX32)
+ ret = PER_LINUX;
+
+ return ret;
+}
#include <asm/irq.h>
#include <asm/s390_ext.h>
-
/* change this if you have some constant time drift */
-#define USECS_PER_JIFFY ((signed long)1000000/HZ)
-#define CLK_TICKS_PER_JIFFY ((signed long)USECS_PER_JIFFY<<12)
+#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
+#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
#define TICK_SIZE tick
-static uint64_t init_timer_cc, last_timer_cc;
+static ext_int_info_t ext_int_info_timer;
+static uint64_t init_timer_cc;
extern rwlock_t xtime_lock;
extern unsigned long wall_jiffies;
-void tod_to_timeval(uint64_t todval, struct timeval *xtime)
+void tod_to_timeval(__u64 todval, struct timeval *xtime)
{
-#if 0
- const int high_bit = 0x80000000L;
- const int c_f4240 = 0xf4240L;
- const int c_7a120 = 0x7a120;
- /* We have to divide the 64 bit value todval by 4096
- * (because the 2^12 bit is the one that changes every
- * microsecond) and then split it into seconds and
- * microseconds. A value of max (2^52-1) divided by
- * the value 0xF4240 can yield a max result of approx
- * (2^32.068). Thats to big to fit into a signed int
- * ... hacking time!
- */
- asm volatile ("L 2,%1\n\t"
- "LR 3,2\n\t"
- "SRL 2,12\n\t"
- "SLL 3,20\n\t"
- "L 4,%O1+4(%R1)\n\t"
- "SRL 4,12\n\t"
- "OR 3,4\n\t" /* now R2/R3 contain (todval >> 12) */
- "SR 4,4\n\t"
- "CL 2,%2\n\t"
- "JL .+12\n\t"
- "S 2,%2\n\t"
- "L 4,%3\n\t"
- "D 2,%4\n\t"
- "OR 3,4\n\t"
- "ST 2,%O0+4(%R0)\n\t"
- "ST 3,%0"
- : "=m" (*xtime) : "m" (todval),
- "m" (c_7a120), "m" (high_bit), "m" (c_f4240)
- : "cc", "memory", "2", "3", "4" );
-#else
- todval >>= 12;
- xtime->tv_sec = todval / 1000000;
- xtime->tv_usec = todval % 1000000;
-#endif
+ todval >>= 12;
+ xtime->tv_sec = todval / 1000000;
+ xtime->tv_usec = todval % 1000000;
}
-unsigned long do_gettimeoffset(void)
+static inline unsigned long do_gettimeoffset(void)
{
- __u64 timer_cc;
+ __u64 now;
- asm volatile ("STCK %0" : "=m" (timer_cc));
- /* We require the offset from the previous interrupt */
- return ((unsigned long)((timer_cc - last_timer_cc)>>12));
+ asm ("STCK 0(%0)" : : "a" (&now) : "memory", "cc");
+ now = (now - init_timer_cc) >> 12;
+ /* We require the offset from the latest update of xtime */
+ now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+ return (unsigned long) now;
}
/*
{
unsigned long flags;
unsigned long usec, sec;
- unsigned long lost_ticks;
read_lock_irqsave(&xtime_lock, flags);
- lost_ticks = jiffies - wall_jiffies;
- usec = do_gettimeoffset();
- if (lost_ticks)
- usec +=(USECS_PER_JIFFY*lost_ticks);
sec = xtime.tv_sec;
- usec += xtime.tv_usec;
+ usec = xtime.tv_usec + do_gettimeoffset();
read_unlock_irqrestore(&xtime_lock, flags);
while (usec >= 1000000) {
extern __u16 boot_cpu_addr;
#endif
-void do_timer_interrupt(struct pt_regs *regs, __u16 error_code)
+static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
{
int cpu = smp_processor_id();
irq_enter(cpu, 0);
- /*
- * reset timer to 10ms minus time already elapsed
- * since timer-interrupt pending
- */
+ /*
+ * set clock comparator for next tick
+ */
+ S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
+ asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
+
#ifdef CONFIG_SMP
- if(S390_lowcore.cpu_data.cpu_addr==boot_cpu_addr) {
+ if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr)
write_lock(&xtime_lock);
- last_timer_cc = S390_lowcore.jiffy_timer_cc;
- }
-#else
- last_timer_cc = S390_lowcore.jiffy_timer_cc;
-#endif
- /* set clock comparator */
- S390_lowcore.jiffy_timer_cc += CLK_TICKS_PER_JIFFY;
- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer_cc));
-/*
- * In the SMP case we use the local timer interrupt to do the
- * profiling, except when we simulate SMP mode on a uniprocessor
- * system, in that case we have to call the local interrupt handler.
- */
-#ifdef CONFIG_SMP
- /* when SMP, do smp_local_timer_interrupt for *all* CPUs,
- but only do the rest for the boot CPU */
- smp_local_timer_interrupt(regs);
-#else
- if (!user_mode(regs))
- s390_do_profile(regs->psw.addr);
-#endif
+ update_process_times(user_mode(regs));
-#ifdef CONFIG_SMP
- if(S390_lowcore.cpu_data.cpu_addr==boot_cpu_addr)
-#endif
- {
+ if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr) {
do_timer(regs);
-#ifdef CONFIG_SMP
write_unlock(&xtime_lock);
-#endif
}
+#else
+ do_timer(regs);
+#endif
irq_exit(cpu, 0);
}
/*
* Start the clock comparator on the current CPU
*/
-static unsigned long cr0 __attribute__ ((aligned (8)));
-
-void init_100hz_timer(void)
+void init_cpu_timer(void)
{
+ unsigned long cr0;
+
/* allow clock comparator timer interrupt */
asm volatile ("STCTG 0,0,%0" : "=m" (cr0) : : "memory");
cr0 |= 0x800;
asm volatile ("LCTLG 0,0,%0" : : "m" (cr0) : "memory");
- /* set clock comparator */
- /* read the TOD clock */
- asm volatile ("STCK %0" : "=m" (S390_lowcore.jiffy_timer_cc));
- S390_lowcore.jiffy_timer_cc += CLK_TICKS_PER_JIFFY;
- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer_cc));
+ S390_lowcore.jiffy_timer = (__u64) jiffies * CLK_TICKS_PER_JIFFY;
+ S390_lowcore.jiffy_timer += init_timer_cc + CLK_TICKS_PER_JIFFY;
+ asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
}
/*
*/
void __init time_init(void)
{
+ __u64 set_time_cc;
int cc;
/* kick the TOD clock */
- asm volatile ("STCK %1\n\t"
+ asm volatile ("STCK 0(%1)\n\t"
"IPM %0\n\t"
- "SRL %0,28" : "=r" (cc), "=m" (init_timer_cc));
+ "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
+ : "memory", "cc");
switch (cc) {
case 0: /* clock in set state: all is fine */
break;
printk("time_init: TOD clock stopped/non-operational\n");
break;
}
+
+ /* set xtime */
+ set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
+ (0x3c26700LL*1000000*4096);
+ tod_to_timeval(set_time_cc, &xtime);
+
/* request the 0x1004 external interrupt */
- if (register_external_interrupt(0x1004, do_timer_interrupt) != 0)
- panic("Couldn't request external interrupts 0x1004");
- init_100hz_timer();
- init_timer_cc = S390_lowcore.jiffy_timer_cc;
- init_timer_cc -= 0x8126d60e46000000LL -
- (0x3c26700LL*1000000*4096);
- tod_to_timeval(init_timer_cc, &xtime);
+ if (register_early_external_interrupt(0x1004, do_comparator_interrupt,
+ &ext_int_info_timer) != 0)
+ panic("Couldn't request external interrupt 0x1004");
+
+ /* init CPU timer */
+ init_cpu_timer();
}
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#endif
#endif
-extern pgm_check_handler_t do_page_fault;
+extern pgm_check_handler_t do_protection_exception;
+extern pgm_check_handler_t do_segment_exception;
+extern pgm_check_handler_t do_region_exception;
+extern pgm_check_handler_t do_page_exception;
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
+static ext_int_info_t ext_int_pfault;
#endif
+int kstack_depth_to_print = 20;
+
+/*
+ * If the address is either in the .text section of the
+ * kernel, or in the vmalloc'ed module regions, it *may*
+ * be the address of a calling routine
+ */
+extern char _stext, _etext;
+
+#ifdef CONFIG_MODULES
+
+extern struct module *module_list;
+extern struct module kernel_module;
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ int retval = 0;
+ struct module *mod;
+
+ if (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext)
+ return 1;
+
+ for (mod = module_list; mod != &kernel_module; mod = mod->next) {
+ /* mod_bound tests for addr being inside the vmalloc'ed
+ * module area. Of course it'd be better to test only
+ * for the .text subset... */
+ if (mod_bound(addr, 0, mod)) {
+ retval = 1;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+#else
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ return (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext);
+}
+
+#endif
+
+void show_trace(unsigned long * stack)
+{
+ unsigned long backchain, low_addr, high_addr, ret_addr;
+ int i;
+
+ if (!stack)
+ stack = (unsigned long*)&stack;
+
+ printk("Call Trace: ");
+ low_addr = ((unsigned long) stack) & PSW_ADDR_MASK;
+ high_addr = (low_addr & (-THREAD_SIZE)) + THREAD_SIZE;
+ /* Skip the first frame (biased stack) */
+ backchain = *((unsigned long *) low_addr) & PSW_ADDR_MASK;
+ /* Print up to 8 lines */
+ for (i = 0; i < 8; i++) {
+ if (backchain < low_addr || backchain >= high_addr)
+ break;
+ ret_addr = *((unsigned long *) (backchain+112)) & PSW_ADDR_MASK;
+ if (!kernel_text_address(ret_addr))
+ break;
+ if (i && ((i % 3) == 0))
+ printk("\n ");
+ printk("[<%016lx>] ", ret_addr);
+ low_addr = backchain;
+ backchain = *((unsigned long *) backchain) & PSW_ADDR_MASK;
+ }
+ printk("\n");
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+ /*
+ * We can't print the backtrace of a running process. It is
+ * unreliable at best and can cause kernel oopses.
+ */
+ if (tsk->state == TASK_RUNNING)
+ return;
+ show_trace((unsigned long *) tsk->thread.ksp);
+}
+
+void show_stack(unsigned long *sp)
+{
+ unsigned long *stack;
+ int i;
+
+ // debugging aid: "show_stack(NULL);" prints the
+ // back trace for this cpu.
+
+ if (sp == NULL)
+ sp = (unsigned long*) &sp;
+
+ stack = sp;
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if (i && ((i % 4) == 0))
+ printk("\n ");
+ printk("%016lx ", *stack++);
+ }
+ printk("\n");
+ show_trace(sp);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ mm_segment_t old_fs;
+ char *mode;
+ int i;
+
+ mode = (regs->psw.mask & PSW_PROBLEM_STATE) ? "User" : "Krnl";
+ printk("%s PSW : %016lx %016lx\n",
+ mode, (unsigned long) regs->psw.mask,
+ (unsigned long) regs->psw.addr);
+ printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
+ regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+ printk(" %016lx %016lx %016lx %016lx\n",
+ regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+ printk(" %016lx %016lx %016lx %016lx\n",
+ regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+ printk(" %016lx %016lx %016lx %016lx\n",
+ regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+ printk("%s ACRS: %08x %08x %08x %08x\n", mode,
+ regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
+ printk(" %08x %08x %08x %08x\n",
+ regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
+
+ /*
+ * Print the first 20 byte of the instruction stream at the
+ * time of the fault.
+ */
+ old_fs = get_fs();
+ if (regs->psw.mask & PSW_PROBLEM_STATE)
+ set_fs(USER_DS);
+ else
+ set_fs(KERNEL_DS);
+ printk("%s Code: ", mode);
+ for (i = 0; i < 20; i++) {
+ unsigned char c;
+ if (__get_user(c, (char *)(regs->psw.addr + i))) {
+ printk(" Bad PSW.");
+ break;
+ }
+ printk("%02x ", c);
+ }
+ set_fs(old_fs);
+
+ printk("\n");
+}
+
+/* This is called from fs/proc/array.c */
+char *task_show_regs(struct task_struct *task, char *buf)
+{
+ struct pt_regs *regs;
+
+ regs = __KSTK_PTREGS(task);
+ buf += sprintf(buf, "task: %016lx, ksp: %016lx\n",
+ (unsigned long) task, task->thread.ksp);
+ buf += sprintf(buf, "User PSW : %016lx %016lx\n",
+ (unsigned long) regs->psw.mask,
+ (unsigned long) regs->psw.addr);
+ buf += sprintf(buf, "User GPRS: %016lx %016lx %016lx %016lx\n",
+ regs->gprs[0], regs->gprs[1],
+ regs->gprs[2], regs->gprs[3]);
+ buf += sprintf(buf, " %016lx %016lx %016lx %016lx\n",
+ regs->gprs[4], regs->gprs[5],
+ regs->gprs[6], regs->gprs[7]);
+ buf += sprintf(buf, " %016lx %016lx %016lx %016lx\n",
+ regs->gprs[8], regs->gprs[9],
+ regs->gprs[10], regs->gprs[11]);
+ buf += sprintf(buf, " %016lx %016lx %016lx %016lx\n",
+ regs->gprs[12], regs->gprs[13],
+ regs->gprs[14], regs->gprs[15]);
+ buf += sprintf(buf, "User ACRS: %08x %08x %08x %08x\n",
+ regs->acrs[0], regs->acrs[1],
+ regs->acrs[2], regs->acrs[3]);
+ buf += sprintf(buf, " %08x %08x %08x %08x\n",
+ regs->acrs[4], regs->acrs[5],
+ regs->acrs[6], regs->acrs[7]);
+ buf += sprintf(buf, " %08x %08x %08x %08x\n",
+ regs->acrs[8], regs->acrs[9],
+ regs->acrs[10], regs->acrs[11]);
+ buf += sprintf(buf, " %08x %08x %08x %08x\n",
+ regs->acrs[12], regs->acrs[13],
+ regs->acrs[14], regs->acrs[15]);
+ return buf;
+}
+
spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
-#define DO_ERROR(signr, str, name) \
-asmlinkage void name(struct pt_regs * regs, long interruption_code) \
-{ \
- do_trap(interruption_code, signr, str, regs, NULL); \
-}
-
-#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
-asmlinkage void name(struct pt_regs * regs, long interruption_code) \
-{ \
- siginfo_t info; \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = (void *)siaddr; \
- do_trap(interruption_code, signr, str, regs, &info); \
-}
-
static void inline do_trap(long interruption_code, int signr, char *str,
struct pt_regs *regs, siginfo_t *info)
{
if (regs->psw.mask & PSW_PROBLEM_STATE) {
struct task_struct *tsk = current;
- tsk->thread.trap_no = interruption_code;
+ tsk->thread.trap_no = interruption_code & 0xffff;
if (info)
force_sig_info(signr, info, tsk);
else
}
}
+static inline void *get_check_address(struct pt_regs *regs)
+{
+ return (void *) ADDR_BITS_REMOVE(regs->psw.addr-S390_lowcore.pgm_ilc);
+}
+
int do_debugger_trap(struct pt_regs *regs,int signal)
{
if(regs->psw.mask&PSW_PROBLEM_STATE)
#if CONFIG_REMOTE_DEBUG
if(gdb_stub_initialised)
{
- gdb_stub_handle_exception((gdb_pt_regs *)regs,signal);
+ gdb_stub_handle_exception(regs, signal);
return 0;
}
#endif
return 0;
}
+#define DO_ERROR(signr, str, name) \
+asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+{ \
+ do_trap(interruption_code, signr, str, regs, NULL); \
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
+asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void *)siaddr; \
+ do_trap(interruption_code, signr, str, regs, &info); \
+}
+
DO_ERROR(SIGSEGV, "Unknown program exception", default_trap_handler)
-DO_ERROR(SIGILL, "privileged operation", privileged_op)
-DO_ERROR(SIGILL, "execute exception", execute_exception)
-DO_ERROR(SIGSEGV, "addressing exception", addressing_exception)
-DO_ERROR(SIGFPE, "fixpoint divide exception", divide_exception)
-DO_ERROR(SIGILL, "translation exception", translation_exception)
-DO_ERROR(SIGILL, "special operand exception", special_op_exception)
-DO_ERROR(SIGILL, "operand exception", operand_exception)
-DO_ERROR(SIGILL, "specification exception", specification_exception);
+
+DO_ERROR_INFO(SIGBUS, "addressing exception", addressing_exception,
+ BUS_ADRERR, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
+ FPE_INTDIV, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
+ ILL_PRVOPC, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
+ ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
+ ILL_ILLOPN, get_check_address(regs))
+
+static inline void
+do_fp_trap(struct pt_regs *regs, void *location,
+ int fpc, long interruption_code)
+{
+ siginfo_t si;
+
+ si.si_signo = SIGFPE;
+ si.si_errno = 0;
+ si.si_addr = location;
+ si.si_code = 0;
+ /* FPC[2] is Data Exception Code */
+ if ((fpc & 0x00000300) == 0) {
+ /* bits 6 and 7 of DXC are 0 iff IEEE exception */
+ if (fpc & 0x8000) /* invalid fp operation */
+ si.si_code = FPE_FLTINV;
+ else if (fpc & 0x4000) /* div by 0 */
+ si.si_code = FPE_FLTDIV;
+ else if (fpc & 0x2000) /* overflow */
+ si.si_code = FPE_FLTOVF;
+ else if (fpc & 0x1000) /* underflow */
+ si.si_code = FPE_FLTUND;
+ else if (fpc & 0x0800) /* inexact */
+ si.si_code = FPE_FLTRES;
+ }
+ current->thread.ieee_instruction_pointer = (addr_t) location;
+ do_trap(interruption_code, SIGFPE,
+ "floating point exception", regs, &si);
+}
asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
{
__u16 *location;
int do_sig = 0;
- location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
+ location = (__u16 *) get_check_address(regs);
/*
* We got all needed information from the lowcore and can
else
do_sig = 1;
if (do_sig)
- do_trap(interruption_code, SIGILL, "illegal operation", regs, NULL);
+ do_trap(interruption_code, SIGILL,
+ "illegal operation", regs, NULL);
}
asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
{
__u16 *location;
- int do_sig = 0;
- location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
+ location = (__u16 *) get_check_address(regs);
/*
* We got all needed information from the lowcore and can
__asm__ volatile ("stfpc %0\n\t"
: "=m" (current->thread.fp_regs.fpc));
- /* Same code should work when we implement fpu emulation */
- /* provided we call data exception from the fpu emulator */
- if(current->thread.fp_regs.fpc&FPC_DXC_MASK)
- {
- current->thread.ieee_instruction_pointer=(addr_t)location;
- force_sig(SIGFPE, current);
+
+ if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
+ do_fp_trap(regs, location,
+ current->thread.fp_regs.fpc, interruption_code);
+ else {
+ siginfo_t info;
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPN;
+ info.si_addr = location;
+ do_trap(interruption_code, SIGILL,
+ "data exception", regs, &info);
}
- else
- do_sig = 1;
- if (do_sig)
- do_trap(interruption_code, SIGILL, "data exception", regs, NULL);
}
pgm_check_table[1] = &illegal_op;
pgm_check_table[2] = &privileged_op;
pgm_check_table[3] = &execute_exception;
+ pgm_check_table[4] = &do_protection_exception;
pgm_check_table[5] = &addressing_exception;
pgm_check_table[6] = &specification_exception;
pgm_check_table[7] = &data_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
pgm_check_table[0x15] = &operand_exception;
- pgm_check_table[4] = &do_page_fault;
- pgm_check_table[0x10] = &do_page_fault;
- pgm_check_table[0x11] = &do_page_fault;
+ pgm_check_table[0x10] = &do_segment_exception;
+ pgm_check_table[0x11] = &do_page_exception;
pgm_check_table[0x1C] = &privileged_op;
pgm_check_table[0x38] = &addressing_exception;
- pgm_check_table[0x3B] = &do_page_fault;
+ pgm_check_table[0x3B] = &do_region_exception;
#ifdef CONFIG_PFAULT
if (MACHINE_IS_VM) {
/* request the 0x2603 external interrupt */
- if (register_external_interrupt(0x2603, pfault_interrupt) != 0)
+ if (register_early_external_interrupt(0x2603, pfault_interrupt,
+ &ext_int_pfault) != 0)
panic("Couldn't request external interrupt 0x2603");
/*
* Try to get pfault pseudo page faults going.
*/
if (pfault_init() != 0) {
/* Tough luck, no pfault. */
- unregister_external_interrupt(0x2603,
- pfault_interrupt);
+ unregister_early_external_interrupt(0x2603,
+ pfault_interrupt,
+ &ext_int_pfault);
}
}
#endif
lgfr %r2,%r2 # long
lgfr %r3,%r3 # long
llgtr %r4,%r4 # long
- lgfr %r5,%r5 # long
- jg sys32_ptrace # branch to system call
+ llgfr %r5,%r5 # long
+ jg sys_ptrace # branch to system call
.globl sys32_alarm_wrapper
sys32_alarm_wrapper:
llgtr %r3,%r3 # struct rlimit_emu31 *
jg sys32_old_getrlimit # branch to system call
+ .globl sys32_getrlimit_wrapper
+sys32_getrlimit_wrapper:
+ llgfr %r2,%r2 # unsigned int
+ llgtr %r3,%r3 # struct rlimit_emu31 *
+ jg sys32_getrlimit # branch to system call
+
.globl sys32_mmap2_wrapper
sys32_mmap2_wrapper:
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
.globl sys32_newuname_wrapper
sys32_newuname_wrapper:
llgtr %r2,%r2 # struct new_utsname *
- jg sys_newuname # branch to system call
+ jg s390x_newuname # branch to system call
.globl sys32_adjtimex_wrapper
sys32_adjtimex_wrapper:
.globl sys32_personality_wrapper
sys32_personality_wrapper:
llgfr %r2,%r2 # unsigned long
- jg sys_personality # branch to system call
+ jg s390x_personality # branch to system call
.globl sys32_setfsuid16_wrapper
sys32_setfsuid16_wrapper:
llgfr %r4,%r4 # long
jg sys32_lstat64 # branch to system call
+ .globl sys32_stime_wrapper
+sys32_stime_wrapper:
+ llgtr %r2,%r2 # int *
+ jg sys_stime # branch to system call
+
+ .globl sys32_sysctl_wrapper
+sys32_sysctl_wrapper:
+ llgtr %r2,%r2 # struct __sysctl_args32 *
+ jg sys32_sysctl
+
.globl sys32_fstat64_wrapper
sys32_fstat64_wrapper:
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # struct stat64 *
llgfr %r4,%r4 # long
jg sys32_fstat64 # branch to system call
+
+ .globl sys32_futex_wrapper
+sys32_futex_wrapper:
+ llgtr %r2,%r2 # void *
+ lgfr %r3,%r3 # int
+ jg sys_futex # branch to system call
+
+ .globl sys32_setxattr_wrapper
+sys32_setxattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ llgtr %r4,%r4 # void *
+ llgfr %r5,%r5 # size_t
+ lgfr %r6,%r6 # int
+ jg sys_setxattr
+
+ .globl sys32_lsetxattr_wrapper
+sys32_lsetxattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ llgtr %r4,%r4 # void *
+ llgfr %r5,%r5 # size_t
+ lgfr %r6,%r6 # int
+ jg sys_lsetxattr
+
+ .globl sys32_fsetxattr_wrapper
+sys32_fsetxattr_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # char *
+ llgtr %r4,%r4 # void *
+ llgfr %r5,%r5 # size_t
+ lgfr %r6,%r6 # int
+ jg sys_fsetxattr
+
+ .globl sys32_getxattr_wrapper
+sys32_getxattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ llgtr %r4,%r4 # void *
+ llgfr %r5,%r5 # size_t
+ jg sys_getxattr
+
+ .globl sys32_lgetxattr_wrapper
+sys32_lgetxattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ llgtr %r4,%r4 # void *
+ llgfr %r5,%r5 # size_t
+ jg sys_lgetxattr
+
+ .globl sys32_fgetxattr_wrapper
+sys32_fgetxattr_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # char *
+ llgtr %r4,%r4 # void *
+ llgfr %r5,%r5 # size_t
+ jg sys_fgetxattr
+
+ .globl sys32_listxattr_wrapper
+sys32_listxattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ llgfr %r4,%r4 # size_t
+ jg sys_listxattr
+
+ .globl sys32_llistxattr_wrapper
+sys32_llistxattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ llgfr %r4,%r4 # size_t
+ jg sys_llistxattr
+
+ .globl sys32_flistxattr_wrapper
+sys32_flistxattr_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # char *
+ llgfr %r4,%r4 # size_t
+ jg sys_flistxattr
+
+ .globl sys32_removexattr_wrapper
+sys32_removexattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ jg sys_removexattr
+
+ .globl sys32_lremovexattr_wrapper
+sys32_lremovexattr_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # char *
+ jg sys_lremovexattr
+
+ .globl sys32_fremovexattr_wrapper
+sys32_fremovexattr_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # char *
+ jg sys_fremovexattr
+
+ .globl sys32_sched_setaffinity_wrapper
+sys32_sched_setaffinity_wrapper:
+ lgfr %r2,%r2 # int
+ llgfr %r3,%r3 # unsigned int
+ llgtr %r4,%r4 # unsigned long *
+ jg sys32_sched_setaffinity
+
+ .globl sys32_sched_getaffinity_wrapper
+sys32_sched_getaffinity_wrapper:
+ lgfr %r2,%r2 # int
+ llgfr %r3,%r3 # unsigned int
+ llgtr %r4,%r4 # unsigned long *
+ jg sys32_sched_getaffinity
+
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
- * These functions have a non-standard call interface
+ * These functions have standard call interface
*/
#include <asm/lowcore.h>
.text
.align 4
- .globl __copy_from_user_fixup
-__copy_from_user_fixup:
- lg 1,__LC_PGM_OLD_PSW+8
-0: lghi 3,-4096
- ng 3,__LC_TRANS_EXC_ADDR
- sgr 3,4
- bm 4(1)
-1: mvcle 2,4,0
- b 4(1)
+ .globl __copy_from_user_asm
+__copy_from_user_asm:
+ lgr %r5,%r3
+ sacf 512
+0: mvcle %r2,%r4,0
+ jo 0b
+1: sacf 0
+ lgr %r2,%r5
+ br %r14
+2: lghi %r3,-4096
+ ng %r3,__LC_TRANS_EXC_ADDR
+ sgr %r3,%r4
+ jm 1b
+ j 0b
.section __ex_table,"a"
- .align 8
- .quad 1b,0b
+ .align 8
+ .quad 0b,2b
.previous
.align 4
.text
- .globl __copy_to_user_fixup
-__copy_to_user_fixup:
- lg 1,__LC_PGM_OLD_PSW+8
-0: lghi 5,-4096
- ng 5,__LC_TRANS_EXC_ADDR
- sgr 5,4
- bm 4(1)
-1: mvcle 4,2,0
- b 4(1)
+ .globl __copy_to_user_asm
+__copy_to_user_asm:
+ lgr %r5,%r3
+ sacf 512
+0: mvcle %r4,%r2,0
+ jo 0b
+1: sacf 0
+ lgr %r2,%r3
+ br %r14
+2: lghi %r5,-4096
+ ng %r5,__LC_TRANS_EXC_ADDR
+ sgr %r5,%r4
+ jm 1b
+ j 0b
.section __ex_table,"a"
- .align 8
- .quad 1b,0b
+ .align 8
+ .quad 0b,2b
.previous
+ .align 4
+ .text
+ .globl __clear_user_asm
+__clear_user_asm:
+ lgr %r4,%r2
+ lgr %r5,%r3
+ sgr %r2,%r2
+ sgr %r3,%r3
+ sacf 512
+0: mvcle %r4,%r2,0
+ jo 0b
+1: sacf 0
+ lgr %r2,%r5
+ br %r14
+2: lghi %r5,-4096
+ ng %r5,__LC_TRANS_EXC_ADDR
+ sgr %r5,%r4
+ jm 1b
+ j 0b
+ .section __ex_table,"a"
+ .align 8
+ .quad 0b,2b
+ .previous
search_exception_table(unsigned long addr)
{
unsigned long ret = 0;
- unsigned long flags;
#ifndef CONFIG_MODULES
/* There is only the kernel to search. */
ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
return ret;
#else
+ unsigned long flags;
/* The kernel is the last "module" -- no need to treat it special. */
struct module *mp;
#endif
extern void die(const char *,struct pt_regs *,long);
-static void force_sigsegv(struct task_struct *tsk, int code, void *address);
extern spinlock_t timerlist_lock;
}
}
+/*
+ * Check which address space is addressed by the access
+ * register in S390_lowcore.exc_access_id.
+ * Returns 1 for user space and 0 for kernel space.
+ */
+static int __check_access_register(struct pt_regs *regs, int error_code)
+{
+ int areg = S390_lowcore.exc_access_id;
+
+ if (areg == 0)
+ /* Access via access register 0 -> kernel address */
+ return 0;
+ if (regs && areg < NUM_ACRS && regs->acrs[areg] <= 1)
+ /*
+ * access register contains 0 -> kernel address,
+ * access register contains 1 -> user space address
+ */
+ return regs->acrs[areg];
+
+ /* Something unhealthy was done with the access registers... */
+ die("page fault via unknown access register", regs, error_code);
+ do_exit(SIGKILL);
+ return 0;
+}
+
+/*
+ * Check which address space the address belongs to.
+ * Returns 1 for user space and 0 for kernel space.
+ */
+static inline int check_user_space(struct pt_regs *regs, int error_code)
+{
+ /*
+ * The lowest two bits of S390_lowcore.trans_exc_code indicate
+ * which paging table was used:
+ * 0: Primary Segment Table Descriptor
+ * 1: STD determined via access register
+ * 2: Secondary Segment Table Descriptor
+ * 3: Home Segment Table Descriptor
+ */
+ int descriptor = S390_lowcore.trans_exc_code & 3;
+ if (descriptor == 1)
+ return __check_access_register(regs, error_code);
+ return descriptor >> 1;
+}
+
+/*
+ * Send SIGSEGV to task. This is an external routine
+ * to keep the stack usage of do_page_fault small.
+ */
+static void force_sigsegv(struct pt_regs *regs, unsigned long error_code,
+ int si_code, unsigned long address)
+{
+ struct siginfo si;
+
+#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
+#if defined(CONFIG_SYSCTL)
+ if (sysctl_userprocess_debug)
+#endif
+ {
+ printk("User process fault: interruption code 0x%lX\n",
+ error_code);
+ printk("failing address: %lX\n", address);
+ show_regs(regs);
+ }
+#endif
+ si.si_signo = SIGSEGV;
+ si.si_code = si_code;
+ si.si_addr = (void *) address;
+ force_sig_info(SIGSEGV, &si, current);
+}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* error_code:
- * ****0004 Protection -> Write-Protection (suprression)
- * ****0010 Segment translation -> Not present (nullification)
- * ****0011 Page translation -> Not present (nullification)
- * ****003B Region third exception -> Not present (nullification)
+ * 04 Protection -> Write-Protection (suprression)
+ * 10 Segment translation -> Not present (nullification)
+ * 11 Page translation -> Not present (nullification)
+ * 3b Region third trans. -> Not present (nullification)
*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long address;
+ int user_address;
unsigned long fixup;
- int write;
int si_code = SEGV_MAPERR;
- int kernel_address = 0;
tsk = current;
mm = tsk->mm;
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
- if ((error_code & 0xff) == 4 && !(S390_lowcore.trans_exc_code & 4)) {
+ if (error_code == 4 && !(S390_lowcore.trans_exc_code & 4)) {
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (!(regs->psw.mask & PSW_PROBLEM_STATE)) {
address = 0;
- kernel_address = 1;
+ user_address = 0;
goto no_context;
}
* more specific the segment and page table portion of
* the address
*/
-
- address = S390_lowcore.trans_exc_code&-4096L;
-
+ address = S390_lowcore.trans_exc_code & -4096L;
+ user_address = check_user_space(regs, error_code);
/*
- * Check which address space the address belongs to
+ * Verify that the fault happened in user space, that
+ * we are not in an interrupt and that there is a
+ * user context.
*/
- switch (S390_lowcore.trans_exc_code & 3)
- {
- case 0: /* Primary Segment Table Descriptor */
- kernel_address = 1;
- goto no_context;
-
- case 1: /* STD determined via access register */
- if (S390_lowcore.exc_access_id == 0)
- {
- kernel_address = 1;
- goto no_context;
- }
- if (regs && S390_lowcore.exc_access_id < NUM_ACRS)
- {
- if (regs->acrs[S390_lowcore.exc_access_id] == 0)
- {
- kernel_address = 1;
- goto no_context;
- }
- if (regs->acrs[S390_lowcore.exc_access_id] == 1)
- {
- /* user space address */
- break;
- }
- }
- die("page fault via unknown access register", regs, error_code);
- do_exit(SIGKILL);
- break;
-
- case 2: /* Secondary Segment Table Descriptor */
- case 3: /* Home Segment Table Descriptor */
- /* user space address */
- break;
- }
-
- /*
- * Check whether we have a user MM in the first place.
- */
- if (in_interrupt() || !mm || !(regs->psw.mask & _PSW_IO_MASK_BIT))
+ if (user_address == 0 || in_interrupt() || !mm)
goto no_context;
/*
* task's user address space, so we can switch on the
* interrupts again and then search the VMAs
*/
-
__sti();
down_read(&mm->mmap_sem);
* we can handle it..
*/
good_area:
- write = 0;
si_code = SEGV_ACCERR;
+ if (error_code != 4) {
+ /* page not present, check vm flags */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ }
- switch (error_code & 0xFF) {
- case 0x04: /* write, present*/
- write = 1;
- break;
- case 0x10: /* not present*/
- case 0x11: /* not present*/
- case 0x3B: /* not present*/
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- break;
- default:
- printk("code should be 4, 10 or 11 (%lX) \n",error_code&0xFF);
- goto bad_area;
- }
-
- survive:
+survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- switch (handle_mm_fault(mm, vma, address, write)) {
+ switch (handle_mm_fault(mm, vma, address, error_code == 4)) {
case 1:
tsk->min_flt++;
break;
if (regs->psw.mask & PSW_PROBLEM_STATE) {
tsk->thread.prot_addr = address;
tsk->thread.trap_no = error_code;
-#ifndef CONFIG_SYSCTL
-#ifdef CONFIG_PROCESS_DEBUG
- printk("User process fault: interruption code 0x%lX\n",error_code);
- printk("failing address: %lX\n",address);
- show_regs(regs);
-#endif
-#else
- if (sysctl_userprocess_debug) {
- printk("User process fault: interruption code 0x%lX\n",
- error_code);
- printk("failing address: %lX\n", address);
- show_regs(regs);
- }
-#endif
-
- force_sigsegv(tsk, si_code, (void *)address);
+ force_sigsegv(regs, error_code, si_code, address);
return;
}
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
-
- if (kernel_address)
+ if (user_address == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %016lx\n", address);
else
out_of_memory:
up_read(&mm->mmap_sem);
if (tsk->pid == 1) {
- tsk->policy |= SCHED_YIELD;
- schedule();
- down_read(&mm->mmap_sem);
+ yield();
goto survive;
}
printk("VM: killing process %s\n", tsk->comm);
goto no_context;
}
-/*
- * Send SIGSEGV to task. This is an external routine
- * to keep the stack usage of do_page_fault small.
- */
-static void force_sigsegv(struct task_struct *tsk, int code, void *address)
+void do_protection_exception(struct pt_regs *regs, unsigned long error_code)
{
- struct siginfo si;
- si.si_signo = SIGSEGV;
- si.si_code = code;
- si.si_addr = address;
- force_sig_info(SIGSEGV, &si, tsk);
+ regs->psw.addr -= (error_code >> 16);
+ do_exception(regs, 4);
}
+void do_segment_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ do_exception(regs, 0x10);
+}
+
+void do_page_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ do_exception(regs, 0x11);
+}
+
+void do_region_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ do_exception(regs, 0x3b);
+}
#ifdef CONFIG_PFAULT
/*
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
+#include <asm/tlbflush.h>
mmu_gather_t mmu_gathers[NR_CPUS];
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-int do_check_pgt_cache(int low, int high)
-{
- int freed = 0;
- if(pgtable_cache_size > high) {
- do {
- if(pgd_quicklist) {
- free_pgd_slow(get_pgd_fast());
- freed += 4;
- }
- if(pmd_quicklist) {
- pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
- freed += 4;
- }
- if(pte_quicklist) {
- pte_free_slow(pte_alloc_one_fast(NULL, 0));
- freed += 1;
- }
- } while(pgtable_cache_size > low);
- }
- return freed;
-}
-
void show_mem(void)
{
int i, total = 0,reserved = 0;
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
- printk("%ld pages in page table cache\n",pgtable_cache_size);
}
/* References to section boundaries */
}
pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- pmd_populate(&init_mm, pm_dir, pt_dir);
+ pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
- pte = mk_pte_phys(pfn, PAGE_KERNEL);
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&pte);
continue;
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
if (address >= end)
BUG();
do {
- pte_t * pte = pte_alloc(&init_mm, pmd, address);
+ pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
*(.fixup)
*(.gnu.warning)
} = 0x0700
+
+ _etext = .; /* End of text section */
+
.rodata : { *(.rodata) }
.kstrtab : { *(.kstrtab) }
__ksymtab : { *(__ksymtab) }
__stop___ksymtab = .;
- __start___kallsyms = .; /* All kernel symbols */
- __kallsyms : { *(__kallsyms) }
- __stop___kallsyms = .;
-
. = ALIGN(1048576); /* VM shared segments are 1MB aligned */
- _etext = .; /* End of text section */
+ _eshared = .; /* End of shareable data */
.data : { /* Data */
*(.data)
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
- . = ALIGN(4096);
- __init_end = .;
-
+ . = ALIGN(256);
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
*(.initcall7.init)
}
__initcall_end = .;
+ . = ALIGN(256);
+ __per_cpu_start = .;
+ .date.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
*(.fixup)
*(.gnu.warning)
} = 0x0700
+
+ _etext = .; /* End of text section */
+
.rodata : { *(.rodata) *(.rodata.*) }
.kstrtab : { *(.kstrtab) }
__ksymtab : { *(__ksymtab) }
__stop___ksymtab = .;
- __start___kallsyms = .; /* All kernel symbols */
- __kallsyms : { *(__kallsyms) }
- __stop___kallsyms = .;
-
- _etext = .; /* End of text section */
-
.data : { /* Data */
*(.data)
CONSTRUCTORS
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
- . = ALIGN(4096);
- __init_end = .;
-
+ . = ALIGN(256);
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
*(.initcall7.init)
}
__initcall_end = .;
+ . = ALIGN(256);
+ __per_cpu_start = .;
+ .date.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;