iSeries (AS/400).
There are no changes outside of include/asm-ppc64 and arch/ppc64
in this changeset.
--- /dev/null
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Changes for PPC by Gary Thomas
+# Rewritten by Cort Dougan and Paul Mackerras
+# Adjusted for PPC64 by Tom Gall
+#
+
+KERNELLOAD =0xc000000000000000
+
+LINKFLAGS = -T arch/ppc64/vmlinux.lds -Ttext $(KERNELLOAD) -Bstatic
+CFLAGS := $(CFLAGS) -fsigned-char -msoft-float -pipe \
+ -Wno-uninitialized -mminimal-toc -fno-builtin
+CPP = $(CC) -E $(CFLAGS)
+
+
+HEAD := arch/ppc64/kernel/head.o
+
+ARCH_SUBDIRS = arch/ppc64/kernel arch/ppc64/mm arch/ppc64/lib
+SUBDIRS := $(SUBDIRS) $(ARCH_SUBDIRS)
+ARCHIVES := arch/ppc64/kernel/kernel.o arch/ppc64/mm/mm.o arch/ppc64/lib/lib.o $(ARCHIVES)
+CORE_FILES := arch/ppc64/kernel/kernel.o arch/ppc64/mm/mm.o arch/ppc64/lib/lib.o $(CORE_FILES)
+
+ifdef CONFIG_XMON
+SUBDIRS += arch/ppc64/xmon
+CORE_FILES += arch/ppc64/xmon/x.o
+endif
+
+MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+
+ifdef CONFIG_PPC_PSERIES
+BOOT_TARGETS = zImage znetboot.initrd zImage.initrd
+endif
+
+ifdef CONFIG_PPC_ISERIES
+BOOT_TARGETS = vmlinux.sminitrd vmlinux.initrd vmlinux.sm
+endif
+
+$(BOOT_TARGETS): vmlinux
+ @$(MAKEBOOT) $@
+
+znetboot: vmlinux
+ifdef CONFIG_SMP
+ cp -f vmlinux /tftpboot/vmlinux.smp
+else
+ cp -f vmlinux /tftpboot/vmlinux
+endif
+ @$(MAKEBOOT) $@
+
+%_config: arch/ppc64/configs/%_defconfig
+ rm -f .config arch/ppc64/defconfig
+ cp -f arch/ppc64/configs/$(@:config=defconfig) arch/ppc64/defconfig
+
+archclean:
+ rm -f arch/ppc64/kernel/{ppc_defs.h,mk_defs.s,mk_defs_out.c,mk_defs_tpl}
+ @$(MAKEBOOT) clean
+
+archmrproper:
+
+archdep:
+ $(MAKEBOOT) fastdep
--- /dev/null
+# Makefile for making ELF bootable images for booting on CHRP
+# using Open Firmware.
+#
+# Geert Uytterhoeven September 1997
+#
+# Based on coffboot by Paul Mackerras
+# Simplified for ppc64 by Todd Inglett
+#
+# NOTE: this code is built for 32 bit in ELF32 format even though
+# it packages a 64 bit kernel. We do this to simplify the
+# bootloader and increase compatibility with OpenFirmware.
+#
+# To this end we need to define BOOTCC, etc, as the tools
+# needed to build the 32 bit image. These are normally HOSTCC,
+# but may be a third compiler if, for example, you are cross
+# compiling from an intel box. Once the 64bit ppc gcc is
+# stable it will probably simply be a compiler switch to
+# compile for 32bit mode.
+# To make it easier to setup a cross compiler,
+# CROSS32_COMPILE is setup as a prefix just like CROSS_COMPILE
+# in the toplevel makefile.
+
+CROSS32_COMPILE =
+#CROSS32_COMPILE = /usr/local/ppc/bin/powerpc-linux-
+
+BOOTCC = $(CROSS32_COMPILE)gcc
+BOOTCFLAGS = $(HOSTCFLAGS) -I$(HPATH)
+BOOTLD = $(CROSS32_COMPILE)ld
+BOOTAS = $(CROSS32_COMPILE)as
+BOOTAFLAGS = -D__ASSEMBLY__ $(HOSTCFLAGS)
+
+.c.o:
+ $(BOOTCC) $(BOOTCFLAGS) -c -o $*.o $<
+.S.o:
+ $(BOOTCC) $(BOOTAFLAGS) -traditional -c -o $*.o $<
+
+CFLAGS = $(CPPFLAGS) -O -fno-builtin -DSTDC_HEADERS
+LD_ARGS = -Ttext 0x00400000 -e _start
+
+OBJS = crt0.o start.o main.o zlib.o image.o imagesize.o
+#LIBS = $(TOPDIR)/lib/lib.a
+LIBS =
+
+ifeq ($(CONFIG_SMP),y)
+TFTPIMAGE=/tftpboot/zImage.chrp.smp
+else
+TFTPIMAGE=/tftpboot/zImage.chrp
+endif
+
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+all: vmlinux.sm
+else
+all: $(TOPDIR)/zImage
+endif
+
+
+znetboot: zImage
+ cp zImage $(TFTPIMAGE)
+
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+
+addSystemMap: addSystemMap.c
+ $(HOSTCC) $(HOSTCFLAGS) -o addSystemMap addSystemMap.c
+
+vmlinux.sm: $(TOPDIR)/vmlinux addSystemMap
+ ./addSystemMap $(TOPDIR)/System.map $(TOPDIR)/vmlinux vmlinux.sm
+
+
+addRamDisk: addRamDisk.c
+ $(HOSTCC) $(HOSTCFLAGS) -o addRamDisk addRamDisk.c
+
+vmlinux.initrd: $(TOPDIR)/vmlinux addRamDisk ramdisk.image.gz $(TOPDIR)/System.map
+ ./addRamDisk ramdisk.image.gz $(TOPDIR)/System.map $(TOPDIR)/vmlinux vmlinux.initrd
+
+vmlinux.sminitrd: vmlinux.sm addRamDisk ramdisk.image.gz $(TOPDIR)/System.map
+ ./addRamDisk ramdisk.image.gz $(TOPDIR)/System.map vmlinux.sm vmlinux.sminitrd
+
+endif
+
+
+znetboot.initrd: zImage.initrd
+ cp zImage.initrd $(TFTPIMAGE)
+
+floppy: zImage
+ mcopy zImage a:zImage
+
+piggyback: piggyback.c
+ $(HOSTCC) $(HOSTCFLAGS) -DKERNELBASE=$(KERNELBASE) -o piggyback piggyback.c
+
+addnote: addnote.c
+ $(HOSTCC) $(HOSTCFLAGS) -o addnote addnote.c
+
+image.o: piggyback vmlinux.gz
+ ./piggyback image < vmlinux.gz | $(BOOTAS) -o image.o
+
+sysmap.o: piggyback ../../../System.map
+ ./piggyback sysmap < ../../../System.map | $(BOOTAS) -o sysmap.o
+
+initrd.o: ramdisk.image.gz piggyback
+ ./piggyback initrd < ramdisk.image.gz | $(BOOTAS) -o initrd.o
+
+zImage: $(OBJS) no_initrd.o addnote
+ $(BOOTLD) $(LD_ARGS) -T zImage.lds -o $@ $(OBJS) no_initrd.o $(LIBS)
+ ./addnote $@
+
+zImage.initrd: $(OBJS) initrd.o addnote
+ $(BOOTLD) $(LD_ARGS) -T zImage.lds -o $@ $(OBJS) initrd.o $(LIBS)
+ ./addnote $@
+
+
+vmlinux.gz: $(TOPDIR)/vmlinux
+ $(OBJCOPY) -S -O binary $(TOPDIR)/vmlinux vmlinux
+ ls -l vmlinux | awk '{printf "/* generated -- do not edit! */\nint uncompressed_size = %d;\n", $$5}' > imagesize.c
+ $(CROSS_COMPILE)nm -n $(TOPDIR)/vmlinux | tail -1 | awk '{printf "long vmlinux_end = 0x%s;\n", substr($$1,8)}' >> imagesize.c
+ gzip -vf9 vmlinux
+
+imagesize.c: vmlinux.gz
+
+clean:
+ rm -f piggyback note addnote $(OBJS) zImage zImage.initrd vmlinux.gz no_initrd.o imagesize.c addSystemMap vmlinux.sm addRamDisk vmlinux.initrd vmlinux.sminitrd
+
+fastdep:
+ $(TOPDIR)/scripts/mkdep *.[Sch] > .depend
+
+dep:
+ $(CPP) $(CPPFLAGS) -M *.S *.c > .depend
+
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <netinet/in.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#define ElfHeaderSize (64 * 1024)
+#define ElfPages (ElfHeaderSize / 4096)
+#define KERNELBASE (0xc000000000000000)
+
+void get4k(FILE *file, char *buf )
+{
+ unsigned j;
+ unsigned num = fread(buf, 1, 4096, file);
+ for ( j=num; j<4096; ++j )
+ buf[j] = 0;
+}
+
+void put4k(FILE *file, char *buf )
+{
+ fwrite(buf, 1, 4096, file);
+}
+
+void death(const char *msg, FILE *fdesc, const char *fname)
+{
+ printf(msg);
+ fclose(fdesc);
+ unlink(fname);
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+ char inbuf[4096];
+ FILE *ramDisk = NULL;
+ FILE *sysmap = NULL;
+ FILE *inputVmlinux = NULL;
+ FILE *outputVmlinux = NULL;
+
+ unsigned i = 0;
+ unsigned long ramFileLen = 0;
+ unsigned long ramLen = 0;
+ unsigned long roundR = 0;
+
+ unsigned long sysmapFileLen = 0;
+ unsigned long sysmapLen = 0;
+ unsigned long sysmapPages = 0;
+ char* ptr_end = NULL;
+ unsigned long offset_end = 0;
+
+ unsigned long kernelLen = 0;
+ unsigned long actualKernelLen = 0;
+ unsigned long round = 0;
+ unsigned long roundedKernelLen = 0;
+ unsigned long ramStartOffs = 0;
+ unsigned long ramPages = 0;
+ unsigned long roundedKernelPages = 0;
+ unsigned long hvReleaseData = 0;
+ u_int32_t eyeCatcher = 0xc8a5d9c4;
+ unsigned long naca = 0;
+ unsigned long xRamDisk = 0;
+ unsigned long xRamDiskSize = 0;
+ long padPages = 0;
+
+
+ if (argc < 2) {
+ printf("Name of RAM disk file missing.\n");
+ exit(1);
+ }
+
+ if (argc < 3) {
+ printf("Name of System Map input file is missing.\n");
+ exit(1);
+ }
+
+ if (argc < 4) {
+ printf("Name of vmlinux file missing.\n");
+ exit(1);
+ }
+
+ if (argc < 5) {
+ printf("Name of vmlinux output file missing.\n");
+ exit(1);
+ }
+
+
+ ramDisk = fopen(argv[1], "r");
+ if ( ! ramDisk ) {
+ printf("RAM disk file \"%s\" failed to open.\n", argv[1]);
+ exit(1);
+ }
+
+ sysmap = fopen(argv[2], "r");
+ if ( ! sysmap ) {
+ printf("System Map file \"%s\" failed to open.\n", argv[2]);
+ exit(1);
+ }
+
+ inputVmlinux = fopen(argv[3], "r");
+ if ( ! inputVmlinux ) {
+ printf("vmlinux file \"%s\" failed to open.\n", argv[3]);
+ exit(1);
+ }
+
+ outputVmlinux = fopen(argv[4], "w+");
+ if ( ! outputVmlinux ) {
+ printf("output vmlinux file \"%s\" failed to open.\n", argv[4]);
+ exit(1);
+ }
+
+
+
+ /* Input Vmlinux file */
+ fseek(inputVmlinux, 0, SEEK_END);
+ kernelLen = ftell(inputVmlinux);
+ fseek(inputVmlinux, 0, SEEK_SET);
+ printf("kernel file size = %d\n", kernelLen);
+ if ( kernelLen == 0 ) {
+ printf("You must have a linux kernel specified as argv[3]\n");
+ exit(1);
+ }
+
+ actualKernelLen = kernelLen - ElfHeaderSize;
+
+ printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen);
+
+ round = actualKernelLen % 4096;
+ roundedKernelLen = actualKernelLen;
+ if ( round )
+ roundedKernelLen += (4096 - round);
+ printf("Vmlinux length rounded up to a 4k multiple = %ld/0x%lx \n", roundedKernelLen, roundedKernelLen);
+ roundedKernelPages = roundedKernelLen / 4096;
+ printf("Vmlinux pages to copy = %ld/0x%lx \n", roundedKernelPages, roundedKernelPages);
+
+
+
+ /* Input System Map file */
+ /* (needs to be processed simply to determine if we need to add pad pages due to the static variables not being included in the vmlinux) */
+ fseek(sysmap, 0, SEEK_END);
+ sysmapFileLen = ftell(sysmap);
+ fseek(sysmap, 0, SEEK_SET);
+ printf("%s file size = %ld/0x%lx \n", argv[2], sysmapFileLen, sysmapFileLen);
+
+ sysmapLen = sysmapFileLen;
+
+ roundR = 4096 - (sysmapLen % 4096);
+ if (roundR) {
+ printf("Rounding System Map file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
+ sysmapLen += roundR;
+ }
+ printf("Rounded System Map size is %ld/0x%lx \n", sysmapLen, sysmapLen);
+
+ /* Process the Sysmap file to determine where _end is */
+ sysmapPages = sysmapLen / 4096;
+ for (i=0; i<sysmapPages; ++i) {
+ get4k(sysmap, inbuf);
+ }
+ /* search for _end in the last page of the system map */
+ ptr_end = strstr(inbuf, " _end");
+ if (!ptr_end) {
+ printf("Unable to find _end in the sysmap file \n");
+ printf("inbuf: \n");
+ printf("%s \n", inbuf);
+ exit(1);
+ }
+ printf("Found _end in the last page of the sysmap - backing up 10 characters it looks like %s", ptr_end-10);
+ /* convert address of _end in system map to hex offset. */
+ offset_end = (unsigned int)strtol(ptr_end-10, NULL, 16);
+ /* calc how many pages we need to insert between the vmlinux and the start of the ram disk */
+ padPages = offset_end/4096 - roundedKernelPages;
+
+ /* Check and see if the vmlinux is already larger than _end in System.map */
+ if (padPages < 0) {
+ /* vmlinux is larger than _end - adjust the offset to the start of the embedded ram disk */
+ offset_end = roundedKernelLen;
+ printf("vmlinux is larger than _end indicates it needs to be - offset_end = %lx \n", offset_end);
+ padPages = 0;
+ printf("will insert %lx pages between the vmlinux and the start of the ram disk \n", padPages);
+ }
+ else {
+ /* _end is larger than vmlinux - use the offset to _end that we calculated from the system map */
+ printf("vmlinux is smaller than _end indicates is needed - offset_end = %lx \n", offset_end);
+ printf("will insert %lx pages between the vmlinux and the start of the ram disk \n", padPages);
+ }
+
+
+
+ /* Input Ram Disk file */
+ // Set the offset that the ram disk will be started at.
+ ramStartOffs = offset_end; /* determined from the input vmlinux file and the system map */
+ printf("Ram Disk will start at offset = 0x%lx \n", ramStartOffs);
+
+ fseek(ramDisk, 0, SEEK_END);
+ ramFileLen = ftell(ramDisk);
+ fseek(ramDisk, 0, SEEK_SET);
+ printf("%s file size = %ld/0x%lx \n", argv[1], ramFileLen, ramFileLen);
+
+ ramLen = ramFileLen;
+
+ roundR = 4096 - (ramLen % 4096);
+ if ( roundR ) {
+ printf("Rounding RAM disk file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
+ ramLen += roundR;
+ }
+
+ printf("Rounded RAM disk size is %ld/0x%lx \n", ramLen, ramLen);
+ ramPages = ramLen / 4096;
+ printf("RAM disk pages to copy = %ld/0x%lx\n", ramPages, ramPages);
+
+
+
+ // Copy 64K ELF header
+ for (i=0; i<(ElfPages); ++i) {
+ get4k( inputVmlinux, inbuf );
+ put4k( outputVmlinux, inbuf );
+ }
+
+ /* Copy the vmlinux (as full pages). */
+ fseek(inputVmlinux, ElfHeaderSize, SEEK_SET);
+ for ( i=0; i<roundedKernelPages; ++i ) {
+ get4k( inputVmlinux, inbuf );
+ put4k( outputVmlinux, inbuf );
+ }
+
+ /* Insert pad pages (if appropriate) that are needed between */
+ /* | the end of the vmlinux and the ram disk. */
+ for (i=0; i<padPages; ++i) {
+ memset(inbuf, 0, 4096);
+ put4k(outputVmlinux, inbuf);
+ }
+
+ /* Copy the ram disk (as full pages). */
+ for ( i=0; i<ramPages; ++i ) {
+ get4k( ramDisk, inbuf );
+ put4k( outputVmlinux, inbuf );
+ }
+
+ /* Close the input files */
+ fclose(ramDisk);
+ fclose(inputVmlinux);
+ /* And flush the written output file */
+ fflush(outputVmlinux);
+
+
+
+ /* Fixup the new vmlinux to contain the ram disk starting offset (xRamDisk) and the ram disk size (xRamDiskSize) */
+ /* fseek to the hvReleaseData pointer */
+ fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
+ if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
+ death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[4]);
+ }
+ hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
+ printf("hvReleaseData is at %08x\n", hvReleaseData);
+
+ /* fseek to the hvReleaseData */
+ fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
+ if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
+ death("Could not read hvReleaseData\n", outputVmlinux, argv[4]);
+ }
+ /* Check hvReleaseData sanity */
+ if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
+ death("hvReleaseData is invalid\n", outputVmlinux, argv[4]);
+ }
+ /* Get the naca pointer */
+ naca = ntohl(*((u_int32_t*) &inbuf[0x0C])) - KERNELBASE;
+ printf("Naca is at offset 0x%lx \n", naca);
+
+ /* fseek to the naca */
+ fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
+ if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
+ death("Could not read naca\n", outputVmlinux, argv[4]);
+ }
+ xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
+ xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
+ /* Make sure a RAM disk isn't already present */
+ if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
+ death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[4]);
+ }
+ /* Fill in the values */
+ *((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
+ *((u_int32_t *) &inbuf[0x14]) = htonl(ramPages);
+
+ /* Write out the new naca */
+ fflush(outputVmlinux);
+ fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
+ if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
+ death("Could not write naca\n", outputVmlinux, argv[4]);
+ }
+ printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08x\n",
+ ramPages, ramStartOffs);
+
+ /* Done */
+ fclose(outputVmlinux);
+ /* Set permission to executable */
+ chmod(argv[4], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
+
+ return 0;
+}
+
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <byteswap.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+
+void xlate( char * inb, char * trb, unsigned len )
+{
+ unsigned i;
+ for ( i=0; i<len; ++i )
+ {
+ char c = *inb++;
+ char c1 = c >> 4;
+ char c2 = c & 0xf;
+ if ( c1 > 9 )
+ c1 = c1 + 'A' - 10;
+ else
+ c1 = c1 + '0';
+ if ( c2 > 9 )
+ c2 = c2 + 'A' - 10;
+ else
+ c2 = c2 + '0';
+ *trb++ = c1;
+ *trb++ = c2;
+ }
+ *trb = 0;
+}
+
+#define ElfHeaderSize (64 * 1024)
+#define ElfPages (ElfHeaderSize / 4096)
+
+void get4k( /*istream *inf*/FILE *file, char *buf )
+{
+ unsigned j;
+ unsigned num = fread(buf, 1, 4096, file);
+ for ( j=num; j<4096; ++j )
+ buf[j] = 0;
+}
+
+void put4k( /*ostream *outf*/FILE *file, char *buf )
+{
+ fwrite(buf, 1, 4096, file);
+}
+
+int main(int argc, char **argv)
+{
+ char inbuf[4096];
+ FILE *sysmap = NULL;
+ char* ptr_end = NULL;
+ FILE *inputVmlinux = NULL;
+ FILE *outputVmlinux = NULL;
+ long i = 0;
+ unsigned long sysmapFileLen = 0;
+ unsigned long sysmapLen = 0;
+ unsigned long roundR = 0;
+ unsigned long kernelLen = 0;
+ unsigned long actualKernelLen = 0;
+ unsigned long round = 0;
+ unsigned long roundedKernelLen = 0;
+ unsigned long sysmapStartOffs = 0;
+ unsigned long sysmapPages = 0;
+ unsigned long roundedKernelPages = 0;
+ long padPages = 0;
+ if ( argc < 2 )
+ {
+ printf("Name of System Map file missing.\n");
+ exit(1);
+ }
+
+ if ( argc < 3 )
+ {
+ printf("Name of vmlinux file missing.\n");
+ exit(1);
+ }
+
+ if ( argc < 4 )
+ {
+ printf("Name of vmlinux output file missing.\n");
+ exit(1);
+ }
+
+ sysmap = fopen(argv[1], "r");
+ if ( ! sysmap )
+ {
+ printf("System Map file \"%s\" failed to open.\n", argv[1]);
+ exit(1);
+ }
+ inputVmlinux = fopen(argv[2], "r");
+ if ( ! inputVmlinux )
+ {
+ printf("vmlinux file \"%s\" failed to open.\n", argv[2]);
+ exit(1);
+ }
+ outputVmlinux = fopen(argv[3], "w");
+ if ( ! outputVmlinux )
+ {
+ printf("output vmlinux file \"%s\" failed to open.\n", argv[3]);
+ exit(1);
+ }
+
+
+
+ fseek(inputVmlinux, 0, SEEK_END);
+ kernelLen = ftell(inputVmlinux);
+ fseek(inputVmlinux, 0, SEEK_SET);
+ printf("kernel file size = %ld\n", kernelLen);
+ if ( kernelLen == 0 )
+ {
+ printf("You must have a linux kernel specified as argv[2]\n");
+ exit(1);
+ }
+
+
+ actualKernelLen = kernelLen - ElfHeaderSize;
+
+ printf("actual kernel length (minus ELF header) = %ld/%lxx \n", actualKernelLen, actualKernelLen);
+
+ round = actualKernelLen % 4096;
+ roundedKernelLen = actualKernelLen;
+ if ( round )
+ roundedKernelLen += (4096 - round);
+
+ printf("Kernel length rounded up to a 4k multiple = %ld/%lxx \n", roundedKernelLen, roundedKernelLen);
+ roundedKernelPages = roundedKernelLen / 4096;
+ printf("Kernel pages to copy = %ld/%lxx\n", roundedKernelPages, roundedKernelPages);
+
+
+
+ /* Sysmap file */
+ fseek(sysmap, 0, SEEK_END);
+ sysmapFileLen = ftell(sysmap);
+ fseek(sysmap, 0, SEEK_SET);
+ printf("%s file size = %ld\n", argv[1], sysmapFileLen);
+
+ sysmapLen = sysmapFileLen;
+
+ roundR = 4096 - (sysmapLen % 4096);
+ if (roundR)
+ {
+ printf("Rounding System Map file up to a multiple of 4096, adding %ld\n", roundR);
+ sysmapLen += roundR;
+ }
+ printf("Rounded System Map size is %ld\n", sysmapLen);
+
+ /* Process the Sysmap file to determine the true end of the kernel */
+ sysmapPages = sysmapLen / 4096;
+ printf("System map pages to copy = %ld\n", sysmapPages);
+ for (i=0; i<sysmapPages; ++i)
+ {
+ get4k(sysmap, inbuf);
+ }
+ /* search for _end in the last page of the system map */
+ ptr_end = strstr(inbuf, " _end");
+ if (!ptr_end)
+ {
+ printf("Unable to find _end in the sysmap file \n");
+ printf("inbuf: \n");
+ printf("%s \n", inbuf);
+ exit(1);
+ }
+ printf("Found _end in the last page of the sysmap - backing up 10 characters it looks like %s", ptr_end-10);
+ sysmapStartOffs = (unsigned int)strtol(ptr_end-10, NULL, 16);
+ /* calc how many pages we need to insert between the vmlinux and the start of the sysmap */
+ padPages = sysmapStartOffs/4096 - roundedKernelPages;
+
+ /* Check and see if the vmlinux is larger than _end in System.map */
+ if (padPages < 0)
+ { /* vmlinux is larger than _end - adjust the offset to start the embedded system map */
+ sysmapStartOffs = roundedKernelLen;
+ printf("vmlinux is larger than _end indicates it needs to be - sysmapStartOffs = %lx \n", sysmapStartOffs);
+ padPages = 0;
+ printf("will insert %lx pages between the vmlinux and the start of the sysmap \n", padPages);
+ }
+ else
+ { /* _end is larger than vmlinux - use the sysmapStartOffs we calculated from the system map */
+ printf("vmlinux is smaller than _end indicates is needed - sysmapStartOffs = %lx \n", sysmapStartOffs);
+ printf("will insert %lx pages between the vmlinux and the start of the sysmap \n", padPages);
+ }
+
+
+
+
+ /* Copy 64K ELF header */
+ for (i=0; i<(ElfPages); ++i)
+ {
+ get4k( inputVmlinux, inbuf );
+ put4k( outputVmlinux, inbuf );
+ }
+
+
+ /* Copy the vmlinux (as full pages). */
+ fseek(inputVmlinux, ElfHeaderSize, SEEK_SET);
+ for ( i=0; i<roundedKernelPages; ++i )
+ {
+ get4k( inputVmlinux, inbuf );
+
+ /* Set the offsets (of the start and end) of the embedded sysmap so it is set in the vmlinux.sm */
+ if ( i == 0 )
+ {
+ unsigned long * p;
+ printf("Storing embedded_sysmap_start at 0x3c\n");
+ p = (unsigned long *)(inbuf + 0x3c);
+
+#if (BYTE_ORDER == __BIG_ENDIAN)
+ *p = sysmapStartOffs;
+#else
+ *p = bswap_32(sysmapStartOffs);
+#endif
+
+ printf("Storing embedded_sysmap_end at 0x44\n");
+ p = (unsigned long *)(inbuf + 0x44);
+
+#if (BYTE_ORDER == __BIG_ENDIAN)
+ *p = sysmapStartOffs + sysmapFileLen;
+#else
+ *p = bswap_32(sysmapStartOffs + sysmapFileLen);
+#endif
+ }
+
+ put4k( outputVmlinux, inbuf );
+ }
+
+
+ /* Insert any pad pages between the end of the vmlinux and where the system map needs to be. */
+ for (i=0; i<padPages; ++i)
+ {
+ memset(inbuf, 0, 4096);
+ put4k(outputVmlinux, inbuf);
+ }
+
+
+ /* Copy the system map (as full pages). */
+ fseek(sysmap, 0, SEEK_SET); /* start reading from begining of the system map */
+ for ( i=0; i<sysmapPages; ++i )
+ {
+ get4k( sysmap, inbuf );
+ put4k( outputVmlinux, inbuf );
+ }
+
+
+ fclose(sysmap);
+ fclose(inputVmlinux);
+ fclose(outputVmlinux);
+ /* Set permission to executable */
+ chmod(argv[3], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
+
+ return 0;
+}
+
--- /dev/null
+/*
+ * Program to hack in a PT_NOTE program header entry in an ELF file.
+ * This is needed for OF on RS/6000s to load an image correctly.
+ * Note that OF needs a program header entry for the note, not an
+ * ELF section.
+ *
+ * Copyright 2000 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Usage: addnote zImage
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+char arch[] = "PowerPC";
+
+#define N_DESCR 6
+unsigned int descr[N_DESCR] = {
+ 0xffffffff, /* real-mode = true */
+ 0x00c00000, /* real-base, i.e. where we expect OF to be */
+ 0xffffffff, /* real-size */
+ 0xffffffff, /* virt-base */
+ 0xffffffff, /* virt-size */
+ 0x4000, /* load-base */
+};
+
+unsigned char buf[512];
+
+#define GET_16BE(off) ((buf[off] << 8) + (buf[(off)+1]))
+#define GET_32BE(off) ((GET_16BE(off) << 16) + GET_16BE((off)+2))
+
+#define PUT_16BE(off, v) (buf[off] = ((v) >> 8) & 0xff, \
+ buf[(off) + 1] = (v) & 0xff)
+#define PUT_32BE(off, v) (PUT_16BE((off), (v) >> 16), \
+ PUT_16BE((off) + 2, (v)))
+
+/* Structure of an ELF file */
+#define E_IDENT 0 /* ELF header */
+#define E_PHOFF 28
+#define E_PHENTSIZE 42
+#define E_PHNUM 44
+#define E_HSIZE 52 /* size of ELF header */
+
+#define EI_MAGIC 0 /* offsets in E_IDENT area */
+#define EI_CLASS 4
+#define EI_DATA 5
+
+#define PH_TYPE 0 /* ELF program header */
+#define PH_OFFSET 4
+#define PH_FILESZ 16
+#define PH_HSIZE 32 /* size of program header */
+
+#define PT_NOTE 4 /* Program header type = note */
+
+#define ELFCLASS32 1
+#define ELFDATA2MSB 2
+
+unsigned char elf_magic[4] = { 0x7f, 'E', 'L', 'F' };
+
+int
+main(int ac, char **av)
+{
+ int fd, n, i;
+ int ph, ps, np;
+ int nnote, ns;
+
+ if (ac != 2) {
+ fprintf(stderr, "Usage: %s elf-file\n", av[0]);
+ exit(1);
+ }
+ fd = open(av[1], O_RDWR);
+ if (fd < 0) {
+ perror(av[1]);
+ exit(1);
+ }
+
+ nnote = strlen(arch) + 1 + (N_DESCR + 3) * 4;
+
+ n = read(fd, buf, sizeof(buf));
+ if (n < 0) {
+ perror("read");
+ exit(1);
+ }
+
+ if (n < E_HSIZE || memcmp(&buf[E_IDENT+EI_MAGIC], elf_magic, 4) != 0)
+ goto notelf;
+
+ if (buf[E_IDENT+EI_CLASS] != ELFCLASS32
+ || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) {
+ fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n",
+ av[1]);
+ exit(1);
+ }
+
+ ph = GET_32BE(E_PHOFF);
+ ps = GET_16BE(E_PHENTSIZE);
+ np = GET_16BE(E_PHNUM);
+ if (ph < E_HSIZE || ps < PH_HSIZE || np < 1)
+ goto notelf;
+ if (ph + (np + 1) * ps + nnote > n)
+ goto nospace;
+
+ for (i = 0; i < np; ++i) {
+ if (GET_32BE(ph + PH_TYPE) == PT_NOTE) {
+ fprintf(stderr, "%s already has a note entry\n",
+ av[1]);
+ exit(0);
+ }
+ ph += ps;
+ }
+
+ /* XXX check that the area we want to use is all zeroes */
+ for (i = 0; i < ps + nnote; ++i)
+ if (buf[ph + i] != 0)
+ goto nospace;
+
+ /* fill in the program header entry */
+ ns = ph + ps;
+ PUT_32BE(ph + PH_TYPE, PT_NOTE);
+ PUT_32BE(ph + PH_OFFSET, ns);
+ PUT_32BE(ph + PH_FILESZ, nnote);
+
+ /* fill in the note area we point to */
+ /* XXX we should probably make this a proper section */
+ PUT_32BE(ns, strlen(arch) + 1);
+ PUT_32BE(ns + 4, N_DESCR * 4);
+ PUT_32BE(ns + 8, 0x1275);
+ strcpy(&buf[ns + 12], arch);
+ ns += 12 + strlen(arch) + 1;
+ for (i = 0; i < N_DESCR; ++i)
+ PUT_32BE(ns + i * 4, descr[i]);
+
+ /* Update the number of program headers */
+ PUT_16BE(E_PHNUM, np + 1);
+
+ /* write back */
+ lseek(fd, (long) 0, SEEK_SET);
+ i = write(fd, buf, n);
+ if (i < 0) {
+ perror("write");
+ exit(1);
+ }
+ if (i < n) {
+ fprintf(stderr, "%s: write truncated\n", av[1]);
+ exit(1);
+ }
+
+ exit(0);
+
+ notelf:
+ fprintf(stderr, "%s does not appear to be an ELF file\n", av[0]);
+ exit(1);
+
+ nospace:
+ fprintf(stderr, "sorry, I can't find space in %s to put the note\n",
+ av[0]);
+ exit(1);
+}
--- /dev/null
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
+ */
+ .text
+ .globl _start
+_start:
+ lis 9,_start@h
+ lis 8,_etext@ha
+ addi 8,8,_etext@l
+1: dcbf 0,9
+ icbi 0,9
+ addi 9,9,0x20
+ cmplwi 0,9,8
+ blt 1b
+ sync
+ isync
+
+ ## Clear out the BSS as per ANSI C requirements
+
+ lis 7,_end@ha
+ addi 7,7,_end@l # r7 = &_end
+ lis 8,__bss_start@ha #
+ addi 8,8,__bss_start@l # r8 = &_bss_start
+
+ ## Determine how large an area, in number of words, to clear
+
+ subf 7,8,7 # r7 = &_end - &_bss_start + 1
+ addi 7,7,3 # r7 += 3
+ srwi. 7,7,2 # r7 = size in words.
+ beq 3f # If the size is zero, do not bother
+ addi 8,8,-4 # r8 -= 4
+ mtctr 7 # SPRN_CTR = number of words to clear
+ li 0,0 # r0 = 0
+2: stwu 0,4(8) # Clear out a word
+ bdnz 2b # If we are not done yet, keep clearing
+3:
+
+
+ b start
+
+
+
+/*
+ * Flush the dcache and invalidate the icache for a range of addresses.
+ *
+ * flush_cache(addr, len)
+ */
+ .global flush_cache
+flush_cache:
+ addi 4,4,0x1f /* len = (len + 0x1f) / 0x20 */
+ rlwinm. 4,4,27,5,31
+ mtctr 4
+ beqlr
+1: dcbf 0,3
+ icbi 0,3
+ addi 3,3,0x20
+ bdnz 1b
+ sync
+ isync
+ blr
+
+
+#define r0 0
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+
+ .globl strcpy
+strcpy:
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r5)
+ bne 1b
+ blr
+
+ .globl strncpy
+strncpy:
+ cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r6)
+ bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
+ blr
+
+ .globl strcat
+strcat:
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r5)
+ cmpwi 0,r0,0
+ bne 1b
+ addi r5,r5,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r5)
+ bne 1b
+ blr
+
+ .globl strcmp
+strcmp:
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r3,1(r5)
+ cmpwi 1,r3,0
+ lbzu r0,1(r4)
+ subf. r3,r0,r3
+ beqlr 1
+ beq 1b
+ blr
+
+ .globl strlen
+strlen:
+ addi r4,r3,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ bne 1b
+ subf r3,r3,r4
+ blr
+
+ .globl memset
+memset:
+ rlwimi r4,r4,8,16,23
+ rlwimi r4,r4,16,0,15
+ addi r6,r3,-4
+ cmplwi 0,r5,4
+ blt 7f
+ stwu r4,4(r6)
+ beqlr
+ andi. r0,r6,3
+ add r5,r0,r5
+ subf r6,r0,r6
+ rlwinm r0,r5,32-2,2,31
+ mtctr r0
+ bdz 6f
+1: stwu r4,4(r6)
+ bdnz 1b
+6: andi. r5,r5,3
+7: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r6,3
+8: stbu r4,1(r6)
+ bdnz 8b
+ blr
+
+ .globl bcopy
+bcopy:
+ mr r6,r3
+ mr r3,r4
+ mr r4,r6
+ b memcpy
+
+ .globl memmove
+memmove:
+ cmplw 0,r3,r4
+ bgt backwards_memcpy
+ /* fall through */
+
+ .globl memcpy
+memcpy:
+ rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+ addi r6,r3,-4
+ addi r4,r4,-4
+ beq 2f /* if less than 8 bytes to do */
+ andi. r0,r6,3 /* get dest word aligned */
+ mtctr r7
+ bne 5f
+1: lwz r7,4(r4)
+ lwzu r8,8(r4)
+ stw r7,4(r6)
+ stwu r8,8(r6)
+ bdnz 1b
+ andi. r5,r5,7
+2: cmplwi 0,r5,4
+ blt 3f
+ lwzu r0,4(r4)
+ addi r5,r5,-4
+ stwu r0,4(r6)
+3: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r4,r4,3
+ addi r6,r6,3
+4: lbzu r0,1(r4)
+ stbu r0,1(r6)
+ bdnz 4b
+ blr
+5: subfic r0,r0,4
+ mtctr r0
+6: lbz r7,4(r4)
+ addi r4,r4,1
+ stb r7,4(r6)
+ addi r6,r6,1
+ bdnz 6b
+ subf r5,r0,r5
+ rlwinm. r7,r5,32-3,3,31
+ beq 2b
+ mtctr r7
+ b 1b
+
+ .globl backwards_memcpy
+backwards_memcpy:
+ rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+ add r6,r3,r5
+ add r4,r4,r5
+ beq 2f
+ andi. r0,r6,3
+ mtctr r7
+ bne 5f
+1: lwz r7,-4(r4)
+ lwzu r8,-8(r4)
+ stw r7,-4(r6)
+ stwu r8,-8(r6)
+ bdnz 1b
+ andi. r5,r5,7
+2: cmplwi 0,r5,4
+ blt 3f
+ lwzu r0,-4(r4)
+ subi r5,r5,4
+ stwu r0,-4(r6)
+3: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+4: lbzu r0,-1(r4)
+ stbu r0,-1(r6)
+ bdnz 4b
+ blr
+5: mtctr r0
+6: lbzu r7,-1(r4)
+ stbu r7,-1(r6)
+ bdnz 6b
+ subf r5,r0,r5
+ rlwinm. r7,r5,32-3,3,31
+ beq 2b
+ mtctr r7
+ b 1b
+
+ .globl memcmp
+memcmp:
+ cmpwi 0,r5,0
+ blelr
+ mtctr r5
+ addi r6,r3,-1
+ addi r4,r4,-1
+1: lbzu r3,1(r6)
+ lbzu r0,1(r4)
+ subf. r3,r0,r3
+ bdnzt 2,1b
+ blr
--- /dev/null
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * Updates for PPC64 by Todd Inglett & Dave Engebretsen.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define __KERNEL__
+#include "zlib.h"
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/bootinfo.h>
+
+void memmove(void *dst, void *im, int len);
+
+extern void *finddevice(const char *);
+extern int getprop(void *, const char *, void *, int);
+extern void printf(const char *fmt, ...);
+extern int sprintf(char *buf, const char *fmt, ...);
+void gunzip(void *, int, unsigned char *, int *);
+void *claim(unsigned int, unsigned int, unsigned int);
+void flush_cache(void *, int);
+void pause(void);
+static struct bi_record *make_bi_recs(unsigned long);
+
+#define RAM_START 0x00000000
+#define RAM_END (64<<20)
+
+#define BOOT_START ((unsigned long)_start)
+#define BOOT_END ((unsigned long)_end)
+
+/* Value picked to match that used by yaboot */
+#define PROG_START 0x01400000
+
+char *avail_ram;
+char *begin_avail, *end_avail;
+char *avail_high;
+unsigned int heap_use;
+unsigned int heap_max;
+unsigned long initrd_start = 0;
+unsigned long initrd_size = 0;
+
+extern char _end[];
+extern char image_data[];
+extern int image_len;
+extern char initrd_data[];
+extern int initrd_len;
+extern char sysmap_data[];
+extern int sysmap_len;
+extern int uncompressed_size;
+extern long vmlinux_end;
+
+static char scratch[128<<10]; /* 128kB of scratch space for gunzip */
+
+typedef void (*kernel_entry_t)( unsigned long,
+ unsigned long,
+ void *,
+ struct bi_record *);
+
+void
+chrpboot(unsigned long a1, unsigned long a2, void *prom)
+{
+ unsigned len;
+ void *dst = (void *)-1;
+ unsigned long claim_addr;
+ unsigned char *im;
+ extern char _start;
+ struct bi_record *bi_recs;
+ kernel_entry_t kernel_entry;
+
+ printf("chrpboot starting: loaded at 0x%x\n\r", (unsigned)&_start);
+
+ if (initrd_len) {
+ initrd_size = initrd_len;
+ initrd_start = (RAM_END - initrd_size) & ~0xFFF;
+ a1 = a2 = 0;
+ claim(initrd_start, RAM_END - initrd_start, 0);
+ printf("initial ramdisk moving 0x%lx <- 0x%lx (%lx bytes)\n\r",
+ initrd_start, (unsigned long)initrd_data, initrd_size);
+ memcpy((void *)initrd_start, (void *)initrd_data, initrd_size);
+ }
+
+ im = image_data;
+ len = image_len;
+ uncompressed_size = PAGE_ALIGN(uncompressed_size);
+
+ for(claim_addr = PROG_START;
+ claim_addr <= PROG_START * 8;
+ claim_addr += 0x100000) {
+ printf(" trying: 0x%08lx\n\r", claim_addr);
+ dst = claim(claim_addr, uncompressed_size, 0);
+ if (dst != (void *)-1) break;
+ }
+ if (dst == (void *)-1) {
+ printf("claim error, can't allocate kernel memory\n\r");
+ return;
+ }
+
+ if (im[0] == 0x1f && im[1] == 0x8b) {
+ avail_ram = scratch;
+ begin_avail = avail_high = avail_ram;
+ end_avail = scratch + sizeof(scratch);
+ printf("gunzipping (0x%x <- 0x%x:0x%0x)...",
+ (unsigned)dst, (unsigned)im, (unsigned)im+len);
+ gunzip(dst, uncompressed_size, im, &len);
+ printf("done %u bytes\n\r", len);
+ printf("%u bytes of heap consumed, max in use %u\n\r",
+ (unsigned)(avail_high - begin_avail), heap_max);
+ } else {
+ memmove(dst, im, len);
+ }
+
+ flush_cache(dst, len);
+
+ bi_recs = make_bi_recs((unsigned long)dst + vmlinux_end);
+
+ kernel_entry = (kernel_entry_t)dst;
+ printf( "kernel:\n\r"
+ " entry addr = 0x%lx\n\r"
+ " a1 = 0x%lx,\n\r"
+ " a2 = 0x%lx,\n\r"
+ " prom = 0x%lx,\n\r"
+ " bi_recs = 0x%lx,\n\r",
+ (unsigned long)kernel_entry, a1, a2,
+ (unsigned long)prom, (unsigned long)bi_recs);
+
+ kernel_entry( a1, a2, prom, bi_recs );
+
+ printf("returned?\n\r");
+
+ pause();
+}
+
+static struct bi_record *
+make_bi_recs(unsigned long addr)
+{
+ struct bi_record *bi_recs;
+ struct bi_record *rec;
+
+ bi_recs = rec = bi_rec_init(addr);
+
+ rec = bi_rec_alloc(rec, 2);
+ rec->tag = BI_FIRST;
+ /* rec->data[0] = ...; # Written below before return */
+ /* rec->data[1] = ...; # Written below before return */
+
+ rec = bi_rec_alloc_bytes(rec, strlen("chrpboot")+1);
+ rec->tag = BI_BOOTLOADER_ID;
+ sprintf( (char *)rec->data, "chrpboot");
+
+ rec = bi_rec_alloc(rec, 2);
+ rec->tag = BI_MACHTYPE;
+ rec->data[0] = _MACH_pSeries;
+ rec->data[1] = 1;
+
+ if ( initrd_size > 0 ) {
+ rec = bi_rec_alloc(rec, 2);
+ rec->tag = BI_INITRD;
+ rec->data[0] = initrd_start;
+ rec->data[1] = initrd_size;
+ }
+
+#if 0
+ if ( sysmap_len > 0 ) {
+ rec = bi_rec_alloc(rec, 2);
+ rec->tag = BI_SYSMAP;
+ rec->data[0] = (unsigned long)sysmap_data;
+ rec->data[1] = sysmap_len;
+ }
+#endif
+
+ rec = bi_rec_alloc(rec, 1);
+ rec->tag = BI_LAST;
+ rec->data[0] = (bi_rec_field)bi_recs;
+
+ /* Save the _end_ address of the bi_rec's in the first bi_rec
+ * data field for easy access by the kernel.
+ */
+ bi_recs->data[0] = (bi_rec_field)rec;
+ bi_recs->data[1] = (bi_rec_field)rec + rec->size - (bi_rec_field)bi_recs;
+
+ return bi_recs;
+}
+
+struct memchunk {
+ unsigned int size;
+ unsigned int pad;
+ struct memchunk *next;
+};
+
+static struct memchunk *freechunks;
+
+void *zalloc(void *x, unsigned items, unsigned size)
+{
+ void *p;
+ struct memchunk **mpp, *mp;
+
+ size *= items;
+ size = _ALIGN(size, sizeof(struct memchunk));
+ heap_use += size;
+ if (heap_use > heap_max)
+ heap_max = heap_use;
+ for (mpp = &freechunks; (mp = *mpp) != 0; mpp = &mp->next) {
+ if (mp->size == size) {
+ *mpp = mp->next;
+ return mp;
+ }
+ }
+ p = avail_ram;
+ avail_ram += size;
+ if (avail_ram > avail_high)
+ avail_high = avail_ram;
+ if (avail_ram > end_avail) {
+ printf("oops... out of memory\n\r");
+ pause();
+ }
+ return p;
+}
+
+void zfree(void *x, void *addr, unsigned nb)
+{
+ struct memchunk *mp = addr;
+
+ nb = _ALIGN(nb, sizeof(struct memchunk));
+ heap_use -= nb;
+ if (avail_ram == addr + nb) {
+ avail_ram = addr;
+ return;
+ }
+ mp->size = nb;
+ mp->next = freechunks;
+ freechunks = mp;
+}
+
+#define HEAD_CRC 2
+#define EXTRA_FIELD 4
+#define ORIG_NAME 8
+#define COMMENT 0x10
+#define RESERVED 0xe0
+
+#define DEFLATED 8
+
+void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
+{
+ z_stream s;
+ int r, i, flags;
+
+ /* skip header */
+ i = 10;
+ flags = src[3];
+ if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
+ printf("bad gzipped data\n\r");
+ exit();
+ }
+ if ((flags & EXTRA_FIELD) != 0)
+ i = 12 + src[10] + (src[11] << 8);
+ if ((flags & ORIG_NAME) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & COMMENT) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & HEAD_CRC) != 0)
+ i += 2;
+ if (i >= *lenp) {
+ printf("gunzip: ran out of data in header\n\r");
+ exit();
+ }
+
+ s.zalloc = zalloc;
+ s.zfree = zfree;
+ r = inflateInit2(&s, -MAX_WBITS);
+ if (r != Z_OK) {
+ printf("inflateInit2 returned %d\n\r", r);
+ exit();
+ }
+ s.next_in = src + i;
+ s.avail_in = *lenp - i;
+ s.next_out = dst;
+ s.avail_out = dstlen;
+ r = inflate(&s, Z_FINISH);
+ if (r != Z_OK && r != Z_STREAM_END) {
+ printf("inflate returned %d msg: %s\n\r", r, s.msg);
+ exit();
+ }
+ *lenp = s.next_out - (unsigned char *) dst;
+ inflateEnd(&s);
+}
+
--- /dev/null
+/*
+ * Copyright (C) Cort Dougan 1999.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Generate a note section as per the CHRP specification.
+ *
+ */
+
+#include <stdio.h>
+
+#define PL(x) printf("%c%c%c%c", ((x)>>24)&0xff, ((x)>>16)&0xff, ((x)>>8)&0xff, (x)&0xff );
+
+int main(void)
+{
+/* header */
+ /* namesz */
+ PL(strlen("PowerPC")+1);
+ /* descrsz */
+ PL(6*4);
+ /* type */
+ PL(0x1275);
+ /* name */
+ printf("PowerPC"); printf("%c", 0);
+
+/* descriptor */
+ /* real-mode */
+ PL(0xffffffff);
+ /* real-base */
+ PL(0x00c00000);
+ /* real-size */
+ PL(0xffffffff);
+ /* virt-base */
+ PL(0xffffffff);
+ /* virt-size */
+ PL(0xffffffff);
+ /* load-base */
+ PL(0x4000);
+ return 0;
+}
--- /dev/null
+char initrd_data[1];
+int initrd_len = 0;
--- /dev/null
+/*
+ * Copyright 2001 IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdio.h>
+#include <unistd.h>
+
+extern long ce_exec_config[];
+
+int main(int argc, char *argv[])
+{
+ int i, cnt, pos, len;
+ unsigned int cksum, val;
+ unsigned char *lp;
+ unsigned char buf[8192];
+ if (argc != 2)
+ {
+ fprintf(stderr, "usage: %s name <in-file >out-file\n",
+ argv[0]);
+ exit(1);
+ }
+ fprintf(stdout, "#\n");
+ fprintf(stdout, "# Miscellaneous data structures:\n");
+ fprintf(stdout, "# WARNING - this file is automatically generated!\n");
+ fprintf(stdout, "#\n");
+ fprintf(stdout, "\n");
+ fprintf(stdout, "\t.data\n");
+ fprintf(stdout, "\t.globl %s_data\n", argv[1]);
+ fprintf(stdout, "%s_data:\n", argv[1]);
+ pos = 0;
+ cksum = 0;
+ while ((len = read(0, buf, sizeof(buf))) > 0)
+ {
+ cnt = 0;
+ lp = (unsigned char *)buf;
+ len = (len + 3) & ~3; /* Round up to longwords */
+ for (i = 0; i < len; i += 4)
+ {
+ if (cnt == 0)
+ {
+ fprintf(stdout, "\t.long\t");
+ }
+ fprintf(stdout, "0x%02X%02X%02X%02X", lp[0], lp[1], lp[2], lp[3]);
+ val = *(unsigned long *)lp;
+ cksum ^= val;
+ lp += 4;
+ if (++cnt == 4)
+ {
+ cnt = 0;
+ fprintf(stdout, " # %x \n", pos+i-12);
+ fflush(stdout);
+ } else
+ {
+ fprintf(stdout, ",");
+ }
+ }
+ if (cnt)
+ {
+ fprintf(stdout, "0\n");
+ }
+ pos += len;
+ }
+ fprintf(stdout, "\t.globl %s_len\n", argv[1]);
+ fprintf(stdout, "%s_len:\t.long\t0x%x\n", argv[1], pos);
+ fflush(stdout);
+ fclose(stdout);
+ fprintf(stderr, "cksum = %x\n", cksum);
+ exit(0);
+}
+
--- /dev/null
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#include <asm/div64.h>
+
+int (*prom)(void *);
+
+void *chosen_handle;
+void *stdin;
+void *stdout;
+void *stderr;
+
+void exit(void);
+void *finddevice(const char *name);
+int getprop(void *phandle, const char *name, void *buf, int buflen);
+void chrpboot(int a1, int a2, void *prom); /* in main.c */
+
+void printk(char *fmt, ...);
+
+void
+start(int a1, int a2, void *promptr)
+{
+ prom = (int (*)(void *)) promptr;
+ chosen_handle = finddevice("/chosen");
+ if (chosen_handle == (void *) -1)
+ exit();
+ if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
+ exit();
+ stderr = stdout;
+ if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
+ exit();
+
+ chrpboot(a1, a2, promptr);
+ for (;;)
+ exit();
+}
+
+int
+write(void *handle, void *ptr, int nb)
+{
+ struct prom_args {
+ char *service;
+ int nargs;
+ int nret;
+ void *ihandle;
+ void *addr;
+ int len;
+ int actual;
+ } args;
+
+ args.service = "write";
+ args.nargs = 3;
+ args.nret = 1;
+ args.ihandle = handle;
+ args.addr = ptr;
+ args.len = nb;
+ args.actual = -1;
+ (*prom)(&args);
+ return args.actual;
+}
+
+int
+read(void *handle, void *ptr, int nb)
+{
+ struct prom_args {
+ char *service;
+ int nargs;
+ int nret;
+ void *ihandle;
+ void *addr;
+ int len;
+ int actual;
+ } args;
+
+ args.service = "read";
+ args.nargs = 3;
+ args.nret = 1;
+ args.ihandle = handle;
+ args.addr = ptr;
+ args.len = nb;
+ args.actual = -1;
+ (*prom)(&args);
+ return args.actual;
+}
+
+void
+exit()
+{
+ struct prom_args {
+ char *service;
+ } args;
+
+ for (;;) {
+ args.service = "exit";
+ (*prom)(&args);
+ }
+}
+
+void
+pause(void)
+{
+ struct prom_args {
+ char *service;
+ } args;
+
+ args.service = "enter";
+ (*prom)(&args);
+}
+
+void *
+finddevice(const char *name)
+{
+ struct prom_args {
+ char *service;
+ int nargs;
+ int nret;
+ const char *devspec;
+ void *phandle;
+ } args;
+
+ args.service = "finddevice";
+ args.nargs = 1;
+ args.nret = 1;
+ args.devspec = name;
+ args.phandle = (void *) -1;
+ (*prom)(&args);
+ return args.phandle;
+}
+
+void *
+claim(unsigned long virt, unsigned long size, unsigned long align)
+{
+ struct prom_args {
+ char *service;
+ int nargs;
+ int nret;
+ unsigned int virt;
+ unsigned int size;
+ unsigned int align;
+ void *ret;
+ } args;
+
+ args.service = "claim";
+ args.nargs = 3;
+ args.nret = 1;
+ args.virt = virt;
+ args.size = size;
+ args.align = align;
+ (*prom)(&args);
+ return args.ret;
+}
+
+int
+getprop(void *phandle, const char *name, void *buf, int buflen)
+{
+ struct prom_args {
+ char *service;
+ int nargs;
+ int nret;
+ void *phandle;
+ const char *name;
+ void *buf;
+ int buflen;
+ int size;
+ } args;
+
+ args.service = "getprop";
+ args.nargs = 4;
+ args.nret = 1;
+ args.phandle = phandle;
+ args.name = name;
+ args.buf = buf;
+ args.buflen = buflen;
+ args.size = -1;
+ (*prom)(&args);
+ return args.size;
+}
+
+int
+putc(int c, void *f)
+{
+ char ch = c;
+
+ if (c == '\n')
+ putc('\r', f);
+ return write(f, &ch, 1) == 1? c: -1;
+}
+
+int
+putchar(int c)
+{
+ return putc(c, stdout);
+}
+
+int
+fputs(char *str, void *f)
+{
+ int n = strlen(str);
+
+ return write(f, str, n) == n? 0: -1;
+}
+
+int
+readchar(void)
+{
+ char ch;
+
+ for (;;) {
+ switch (read(stdin, &ch, 1)) {
+ case 1:
+ return ch;
+ case -1:
+ printk("read(stdin) returned -1\r\n");
+ return -1;
+ }
+ }
+}
+
+static char line[256];
+static char *lineptr;
+static int lineleft;
+
+int
+getchar(void)
+{
+ int c;
+
+ if (lineleft == 0) {
+ lineptr = line;
+ for (;;) {
+ c = readchar();
+ if (c == -1 || c == 4)
+ break;
+ if (c == '\r' || c == '\n') {
+ *lineptr++ = '\n';
+ putchar('\n');
+ break;
+ }
+ switch (c) {
+ case 0177:
+ case '\b':
+ if (lineptr > line) {
+ putchar('\b');
+ putchar(' ');
+ putchar('\b');
+ --lineptr;
+ }
+ break;
+ case 'U' & 0x1F:
+ while (lineptr > line) {
+ putchar('\b');
+ putchar(' ');
+ putchar('\b');
+ --lineptr;
+ }
+ break;
+ default:
+ if (lineptr >= &line[sizeof(line) - 1])
+ putchar('\a');
+ else {
+ putchar(c);
+ *lineptr++ = c;
+ }
+ }
+ }
+ lineleft = lineptr - line;
+ lineptr = line;
+ }
+ if (lineleft == 0)
+ return -1;
+ --lineleft;
+ return *lineptr++;
+}
+
+
+
+/* String functions lifted from lib/vsprintf.c and lib/ctype.c */
+unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+
+size_t strnlen(const char * s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+
+unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
+{
+ unsigned long result = 0,value;
+
+ if (!base) {
+ base = 10;
+ if (*cp == '0') {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit(cp[1])) {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit(*cp) &&
+ (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+ return result;
+}
+
+long simple_strtol(const char *cp,char **endp,unsigned int base)
+{
+ if(*cp=='-')
+ return -simple_strtoul(cp+1,endp,base);
+ return simple_strtoul(cp,endp,base);
+}
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (isdigit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * str, long long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[66];
+ const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(ZEROPAD+LEFT)))
+ while(size-->0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL) {
+ if (base==8)
+ *str++ = '0';
+ else if (base==16) {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+/* Forward decl. needed for IP address printing stuff... */
+int sprintf(char * buf, const char *fmt, ...);
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long long num;
+ int i, base;
+ char * str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+
+ for (str=buf ; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= LEFT; goto repeat;
+ case '+': flags |= PLUS; goto repeat;
+ case ' ': flags |= SPACE; goto repeat;
+ case '#': flags |= SPECIAL; goto repeat;
+ case '0': flags |= ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg(args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ str = number(str,
+ (unsigned long) va_arg(args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else if (qualifier == 'Z') {
+ size_t * ip = va_arg(args, size_t *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ *str++ = '%';
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'L')
+ num = va_arg(args, long long);
+ else if (qualifier == 'l') {
+ num = va_arg(args, unsigned long);
+ if (flags & SIGN)
+ num = (signed long) num;
+ } else if (qualifier == 'Z') {
+ num = va_arg(args, size_t);
+ } else if (qualifier == 'h') {
+ num = (unsigned short) va_arg(args, int);
+ if (flags & SIGN)
+ num = (signed short) num;
+ } else {
+ num = va_arg(args, unsigned int);
+ if (flags & SIGN)
+ num = (signed int) num;
+ }
+ str = number(str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i=vsprintf(buf,fmt,args);
+ va_end(args);
+ return i;
+}
+
+static char sprint_buf[1024];
+
+void
+printk(char *fmt, ...)
+{
+ va_list args;
+ int n;
+
+ va_start(args, fmt);
+ n = vsprintf(sprint_buf, fmt, args);
+ va_end(args);
+ write(stdout, sprint_buf, n);
+}
+
+int
+printf(char *fmt, ...)
+{
+ va_list args;
+ int n;
+
+ va_start(args, fmt);
+ n = vsprintf(sprint_buf, fmt, args);
+ va_end(args);
+ write(stdout, sprint_buf, n);
+ return n;
+}
--- /dev/null
+OUTPUT_ARCH(powerpc)
+SEARCH_DIR(/lib); SEARCH_DIR(/usr/lib); SEARCH_DIR(/usr/local/lib); SEARCH_DIR(/usr/local/powerpc-any-elf/lib);
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .rel.text : { *(.rel.text) }
+ .rela.text : { *(.rela.text) }
+ .rel.data : { *(.rel.data) }
+ .rela.data : { *(.rela.data) }
+ .rel.rodata : { *(.rel.rodata) }
+ .rela.rodata : { *(.rela.rodata) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .plt : { *(.plt) }
+ .text :
+ {
+ *(.text)
+ *(.fixup)
+ *(.got1)
+ }
+ . = ALIGN(4096);
+ _etext = .;
+ PROVIDE (etext = .);
+ .rodata :
+ {
+ *(.rodata)
+ *(.rodata1)
+ }
+ .kstrtab : { *(.kstrtab) }
+ .fini : { *(.fini) } =0
+ .ctors : { *(.ctors) }
+ .dtors : { *(.dtors) }
+ /* Read-write section, merged into data segment: */
+ . = ALIGN(4096);
+ .data :
+ {
+ *(.data)
+ *(.data1)
+ *(.sdata)
+ *(.sdata2)
+ *(.got.plt) *(.got)
+ *(.dynamic)
+ CONSTRUCTORS
+ }
+ . = ALIGN(4096);
+ _edata = .;
+ PROVIDE (edata = .);
+
+ .fixup : { *(.fixup) }
+
+ . = ALIGN(4096);
+ __bss_start = .;
+ .bss :
+ {
+ *(.sbss) *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ }
+ . = ALIGN(4096);
+ _end = . ;
+ PROVIDE (end = .);
+}
--- /dev/null
+/*
+ * This file is derived from various .h and .c files from the zlib-0.95
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets. See zlib.h for conditions of
+ * distribution and use.
+ *
+ * Changes that have been made include:
+ * - changed functions not used outside this file to "local"
+ * - added minCompression parameter to deflateInit2
+ * - added Z_PACKET_FLUSH (see zlib.h for details)
+ * - added inflateIncomp
+ *
+ Copyright (C) 1995 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ gzip@prep.ai.mit.edu madler@alumni.caltech.edu
+
+ *
+ *
+ */
+
+/*+++++*/
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
+
+#define _Z_UTIL_H
+
+#include "zlib.h"
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+#define FAR
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern char *z_errmsg[]; /* indexed by 1-zlib_error */
+
+#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
+/* To be used only when the state is known to be valid */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+ /* common constants */
+
+#define DEFLATED 8
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+ /* functions */
+
+#include <linux/string.h>
+#define zmemcpy memcpy
+#define zmemzero(dest, len) memset(dest, 0, len)
+
+/* Diagnostic functions */
+#ifdef DEBUG_ZLIB
+# include <stdio.h>
+# ifndef verbose
+# define verbose 0
+# endif
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
+
+/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
+/* void zcfree OF((voidpf opaque, voidpf ptr)); */
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr, size) \
+ (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
+#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
+
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/*+++++*/
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state FAR inflate_blocks_statef;
+
+local inflate_blocks_statef * inflate_blocks_new OF((
+ z_stream *z,
+ check_func c, /* check function */
+ uInt w)); /* window size */
+
+local int inflate_blocks OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int)); /* initial return code */
+
+local void inflate_blocks_reset OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ uLongf *)); /* check value on output */
+
+local int inflate_blocks_free OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ uLongf *)); /* check value on output */
+
+local int inflate_addhistory OF((
+ inflate_blocks_statef *,
+ z_stream *));
+
+local int inflate_packet_flush OF((
+ inflate_blocks_statef *));
+
+/*+++++*/
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+typedef struct inflate_huft_s FAR inflate_huft;
+
+struct inflate_huft_s {
+ union {
+ struct {
+ Byte Exop; /* number of extra bits or operation */
+ Byte Bits; /* number of bits in this code or subcode */
+ } what;
+ uInt Nalloc; /* number of these allocated here */
+ Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
+ } word; /* 16-bit, 8 bytes for 32-bit machines) */
+ union {
+ uInt Base; /* literal, length base, or distance base */
+ inflate_huft *Next; /* pointer to next level of table */
+ } more;
+};
+
+#ifdef DEBUG_ZLIB
+ local uInt inflate_hufts;
+#endif
+
+local int inflate_trees_bits OF((
+ uIntf *, /* 19 code lengths */
+ uIntf *, /* bits tree desired/actual depth */
+ inflate_huft * FAR *, /* bits tree result */
+ z_stream *)); /* for zalloc, zfree functions */
+
+local int inflate_trees_dynamic OF((
+ uInt, /* number of literal/length codes */
+ uInt, /* number of distance codes */
+ uIntf *, /* that many (total) code lengths */
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ z_stream *)); /* for zalloc, zfree functions */
+
+local int inflate_trees_fixed OF((
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *)); /* distance tree result */
+
+local int inflate_trees_free OF((
+ inflate_huft *, /* tables to free */
+ z_stream *)); /* for zfree function */
+
+
+/*+++++*/
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+local inflate_codes_statef *inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_stream *));
+
+local int inflate_codes OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int));
+
+local void inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_stream *));
+
+
+/*+++++*/
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* inflate private state */
+struct internal_state {
+
+ /* mode */
+ enum {
+ METHOD, /* waiting for method byte */
+ FLAG, /* waiting for flag byte */
+ BLOCKS, /* decompressing blocks */
+ CHECK4, /* four check bytes to go */
+ CHECK3, /* three check bytes to go */
+ CHECK2, /* two check bytes to go */
+ CHECK1, /* one check byte to go */
+ DONE, /* finished check, done */
+ BAD} /* got an error--stay here */
+ mode; /* current inflate mode */
+
+ /* mode dependent information */
+ union {
+ uInt method; /* if FLAGS, method byte */
+ struct {
+ uLong was; /* computed check value */
+ uLong need; /* stream check value */
+ } check; /* if CHECK, check values to compare */
+ uInt marker; /* if BAD, inflateSync's marker bytes count */
+ } sub; /* submode */
+
+ /* mode independent information */
+ int nowrap; /* flag for no wrapper */
+ uInt wbits; /* log2(window size) (8..15, defaults to 15) */
+ inflate_blocks_statef
+ *blocks; /* current inflate_blocks state */
+
+};
+
+
+int inflateReset(z)
+z_stream *z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->total_in = z->total_out = 0;
+ z->msg = Z_NULL;
+ z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+ inflate_blocks_reset(z->state->blocks, z, &c);
+ Trace((stderr, "inflate: reset\n"));
+ return Z_OK;
+}
+
+
+int inflateEnd(z)
+z_stream *z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->blocks != Z_NULL)
+ inflate_blocks_free(z->state->blocks, z, &c);
+ ZFREE(z, z->state, sizeof(struct internal_state));
+ z->state = Z_NULL;
+ Trace((stderr, "inflate: end\n"));
+ return Z_OK;
+}
+
+
+int inflateInit2(z, w)
+z_stream *z;
+int w;
+{
+ /* initialize state */
+ if (z == Z_NULL)
+ return Z_STREAM_ERROR;
+/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
+/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
+ if ((z->state = (struct internal_state FAR *)
+ ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
+ return Z_MEM_ERROR;
+ z->state->blocks = Z_NULL;
+
+ /* handle undocumented nowrap option (no zlib header or check) */
+ z->state->nowrap = 0;
+ if (w < 0)
+ {
+ w = - w;
+ z->state->nowrap = 1;
+ }
+
+ /* set window size */
+ if (w < 8 || w > 15)
+ {
+ inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ z->state->wbits = (uInt)w;
+
+ /* create inflate_blocks state */
+ if ((z->state->blocks =
+ inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
+ == Z_NULL)
+ {
+ inflateEnd(z);
+ return Z_MEM_ERROR;
+ }
+ Trace((stderr, "inflate: allocated\n"));
+
+ /* reset state */
+ inflateReset(z);
+ return Z_OK;
+}
+
+
+int inflateInit(z)
+z_stream *z;
+{
+ return inflateInit2(z, DEF_WBITS);
+}
+
+
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int inflate(z, f)
+z_stream *z;
+int f;
+{
+ int r;
+ uInt b;
+
+ if (z == Z_NULL || z->next_in == Z_NULL)
+ return Z_STREAM_ERROR;
+ r = Z_BUF_ERROR;
+ while (1) switch (z->state->mode)
+ {
+ case METHOD:
+ NEEDBYTE
+ if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
+ {
+ z->state->mode = BAD;
+ z->msg = "unknown compression method";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+ {
+ z->state->mode = BAD;
+ z->msg = "invalid window size";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = FLAG;
+ case FLAG:
+ NEEDBYTE
+ if ((b = NEXTBYTE) & 0x20)
+ {
+ z->state->mode = BAD;
+ z->msg = "invalid reserved bit";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if (((z->state->sub.method << 8) + b) % 31)
+ {
+ z->state->mode = BAD;
+ z->msg = "incorrect header check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib header ok\n"));
+ z->state->mode = BLOCKS;
+ case BLOCKS:
+ r = inflate_blocks(z->state->blocks, z, r);
+ if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+ r = inflate_packet_flush(z->state->blocks);
+ if (r == Z_DATA_ERROR)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ break;
+ }
+ if (r != Z_STREAM_END)
+ return r;
+ r = Z_OK;
+ inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+ if (z->state->nowrap)
+ {
+ z->state->mode = DONE;
+ break;
+ }
+ z->state->mode = CHECK4;
+ case CHECK4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = CHECK3;
+ case CHECK3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = CHECK2;
+ case CHECK2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = CHECK1;
+ case CHECK1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+
+ if (z->state->sub.check.was != z->state->sub.check.need)
+ {
+ z->state->mode = BAD;
+ z->msg = "incorrect data check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib check ok\n"));
+ z->state->mode = DONE;
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ empty:
+ if (f != Z_PACKET_FLUSH)
+ return r;
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_DATA_ERROR;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int inflateIncomp(z)
+z_stream *z;
+{
+ if (z->state->mode != BLOCKS)
+ return Z_DATA_ERROR;
+ return inflate_addhistory(z->state->blocks, z);
+}
+
+
+int inflateSync(z)
+z_stream *z;
+{
+ uInt n; /* number of bytes to look at */
+ Bytef *p; /* pointer to bytes */
+ uInt m; /* number of marker bytes found in a row */
+ uLong r, w; /* temporaries to save total_in and total_out */
+
+ /* set up */
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->mode != BAD)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0;
+ }
+ if ((n = z->avail_in) == 0)
+ return Z_BUF_ERROR;
+ p = z->next_in;
+ m = z->state->sub.marker;
+
+ /* search */
+ while (n && m < 4)
+ {
+ if (*p == (Byte)(m < 2 ? 0 : 0xff))
+ m++;
+ else if (*p)
+ m = 0;
+ else
+ m = 4 - m;
+ p++, n--;
+ }
+
+ /* restore */
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ z->avail_in = n;
+ z->state->sub.marker = m;
+
+ /* return no joy or set up to restart on a new block */
+ if (m != 4)
+ return Z_DATA_ERROR;
+ r = z->total_in; w = z->total_out;
+ inflateReset(z);
+ z->total_in = r; z->total_out = w;
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+
+/*+++++*/
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ DONEB, /* finished last block, done */
+ BADB} /* got a data error--stuck here */
+ mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ int nblens; /* # elements allocated at blens */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_huft *tl, *td; /* trees to free */
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=WAVAIL;}
+#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
+#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/*
+ * The IBM 150 firmware munges the data right after _etext[]. This
+ * protects it. -- Cort
+ */
+local uInt protect_mask[] = {0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0};
+/* And'ing with mask[n] masks the lower n bits */
+local uInt inflate_mask[] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+/* copy as much as possible from the sliding window to the output area */
+local int inflate_flush OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int));
+
+/*+++++*/
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+local int inflate_fast OF((
+ uInt,
+ uInt,
+ inflate_huft *,
+ inflate_huft *,
+ inflate_blocks_statef *,
+ z_stream *));
+
+
+/*+++++*/
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* Table for deflate from PKZIP's appnote.txt. */
+local uInt border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarily, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+
+
+local void inflate_blocks_reset(s, z, c)
+inflate_blocks_statef *s;
+z_stream *z;
+uLongf *c;
+{
+ if (s->checkfn != Z_NULL)
+ *c = s->check;
+ if (s->mode == BTREE || s->mode == DTREE)
+ ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
+ if (s->mode == CODES)
+ {
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ }
+ s->mode = TYPE;
+ s->bitk = 0;
+ s->bitb = 0;
+ s->read = s->write = s->window;
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(0L, Z_NULL, 0);
+ Trace((stderr, "inflate: blocks reset\n"));
+}
+
+
+local inflate_blocks_statef *inflate_blocks_new(z, c, w)
+z_stream *z;
+check_func c;
+uInt w;
+{
+ inflate_blocks_statef *s;
+
+ if ((s = (inflate_blocks_statef *)ZALLOC
+ (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
+ return s;
+ if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
+ {
+ ZFREE(z, s, sizeof(struct inflate_blocks_state));
+ return Z_NULL;
+ }
+ s->end = s->window + w;
+ s->checkfn = c;
+ s->mode = TYPE;
+ Trace((stderr, "inflate: blocks allocated\n"));
+ inflate_blocks_reset(s, z, &s->check);
+ return s;
+}
+
+
+local int inflate_blocks(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt t; /* temporary storage */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input based on current state */
+ while (1) switch (s->mode)
+ {
+ case TYPE:
+ NEEDBITS(3)
+ t = (uInt)b & 7;
+ s->last = t & 1;
+ switch (t >> 1)
+ {
+ case 0: /* stored */
+ Trace((stderr, "inflate: stored block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ t = k & 7; /* go to byte boundary */
+ DUMPBITS(t)
+ s->mode = LENS; /* get length of stored block */
+ break;
+ case 1: /* fixed */
+ Trace((stderr, "inflate: fixed codes block%s\n",
+ s->last ? " (last)" : ""));
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+
+ inflate_trees_fixed(&bl, &bd, &tl, &td);
+ s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
+ if (s->sub.decode.codes == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.tl = Z_NULL; /* don't try to free these */
+ s->sub.decode.td = Z_NULL;
+ }
+ DUMPBITS(3)
+ s->mode = CODES;
+ break;
+ case 2: /* dynamic */
+ Trace((stderr, "inflate: dynamic codes block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ s->mode = TABLE;
+ break;
+ case 3: /* illegal */
+ DUMPBITS(3)
+ s->mode = BADB;
+ z->msg = "invalid block type";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ break;
+ case LENS:
+ NEEDBITS(32)
+ if (((~b) >> 16) != (b & 0xffff))
+ {
+ s->mode = BADB;
+ z->msg = "invalid stored block lengths";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ s->sub.left = (uInt)b & 0xffff;
+ b = k = 0; /* dump bits */
+ Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
+ s->mode = s->sub.left ? STORED : TYPE;
+ break;
+ case STORED:
+ if (n == 0)
+ LEAVE
+ NEEDOUT
+ t = s->sub.left;
+ if (t > n) t = n;
+ if (t > m) t = m;
+ zmemcpy(q, p, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((s->sub.left -= t) != 0)
+ break;
+ Tracev((stderr, "inflate: stored end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ s->mode = s->last ? DRY : TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14)
+ s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ s->mode = BADB;
+ z->msg = "too many length or distance symbols";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+#endif
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (t < 19)
+ t = 19;
+ if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.trees.nblens = t;
+ DUMPBITS(14)
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: table sizes ok\n"));
+ s->mode = BTREE;
+ case BTREE:
+ while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+ {
+ NEEDBITS(3)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+ DUMPBITS(3)
+ }
+ while (s->sub.trees.index < 19)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+ s->sub.trees.bb = 7;
+ t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+ &s->sub.trees.tb, z);
+ if (t != Z_OK)
+ {
+ r = t;
+ if (r == Z_DATA_ERROR)
+ s->mode = BADB;
+ LEAVE
+ }
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: bits tree ok\n"));
+ s->mode = DTREE;
+ case DTREE:
+ while (t = s->sub.trees.table,
+ s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+ {
+ inflate_huft *h;
+ uInt i, j, c;
+
+ t = s->sub.trees.bb;
+ NEEDBITS(t)
+ h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
+ t = h->word.what.Bits;
+ c = h->more.Base;
+ if (c < 16)
+ {
+ DUMPBITS(t)
+ s->sub.trees.blens[s->sub.trees.index++] = c;
+ }
+ else /* c == 16..18 */
+ {
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+ NEEDBITS(t + i)
+ DUMPBITS(t)
+ j += (uInt)b & inflate_mask[i];
+ DUMPBITS(i)
+ i = s->sub.trees.index;
+ t = s->sub.trees.table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+ (c == 16 && i < 1))
+ {
+ s->mode = BADB;
+ z->msg = "invalid bit length repeat";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+ do {
+ s->sub.trees.blens[i++] = c;
+ } while (--j);
+ s->sub.trees.index = i;
+ }
+ }
+ inflate_trees_free(s->sub.trees.tb, z);
+ s->sub.trees.tb = Z_NULL;
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+ inflate_codes_statef *c;
+
+ bl = 9; /* must be <= 9 for lookahead assumptions */
+ bd = 6; /* must be <= 9 for lookahead assumptions */
+ t = s->sub.trees.table;
+ t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+ s->sub.trees.blens, &bl, &bd, &tl, &td, z);
+ if (t != Z_OK)
+ {
+ if (t == (uInt)Z_DATA_ERROR)
+ s->mode = BADB;
+ r = t;
+ LEAVE
+ }
+ Tracev((stderr, "inflate: trees ok\n"));
+ if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
+ {
+ inflate_trees_free(td, z);
+ inflate_trees_free(tl, z);
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
+ s->sub.decode.codes = c;
+ s->sub.decode.tl = tl;
+ s->sub.decode.td = td;
+ }
+ s->mode = CODES;
+ case CODES:
+ UPDATE
+ if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
+ return inflate_flush(s, z, r);
+ r = Z_OK;
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ LOAD
+ Tracev((stderr, "inflate: codes end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ if (!s->last)
+ {
+ s->mode = TYPE;
+ break;
+ }
+ if (k > 7) /* return unused byte, if any */
+ {
+ Assert(k < 16, "inflate_codes grabbed too many bytes")
+ k -= 8;
+ n++;
+ p--; /* can always return one */
+ }
+ s->mode = DRY;
+ case DRY:
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ s->mode = DONEB;
+ case DONEB:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADB:
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+local int inflate_blocks_free(s, z, c)
+inflate_blocks_statef *s;
+z_stream *z;
+uLongf *c;
+{
+ inflate_blocks_reset(s, z, c);
+ ZFREE(z, s->window, s->end - s->window);
+ ZFREE(z, s, sizeof(struct inflate_blocks_state));
+ Trace((stderr, "inflate: blocks freed\n"));
+ return Z_OK;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+local int inflate_addhistory(s, z)
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ uLong b; /* bit buffer */ /* NOT USED HERE */
+ uInt k; /* bits in bit buffer */ /* NOT USED HERE */
+ uInt t; /* temporary storage */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ if (s->read != s->write)
+ return Z_STREAM_ERROR;
+ if (s->mode != TYPE)
+ return Z_DATA_ERROR;
+
+ /* we're ready to rock */
+ LOAD
+ /* while there is input ready, copy to output buffer, moving
+ * pointers as needed.
+ */
+ while (n) {
+ t = n; /* how many to do */
+ /* is there room until end of buffer? */
+ if (t > m) t = m;
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, t);
+ zmemcpy(q, p, t);
+ q += t;
+ p += t;
+ n -= t;
+ z->total_out += t;
+ s->read = q; /* drag read pointer forward */
+/* WRAP */ /* expand WRAP macro by hand to handle s->read */
+ if (q == s->end) {
+ s->read = q = s->window;
+ m = WAVAIL;
+ }
+ }
+ UPDATE
+ return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+local int inflate_packet_flush(s)
+ inflate_blocks_statef *s;
+{
+ if (s->mode != LENS)
+ return Z_DATA_ERROR;
+ s->mode = TYPE;
+ return Z_OK;
+}
+
+
+/*+++++*/
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+local int huft_build OF((
+ uIntf *, /* code lengths in bits */
+ uInt, /* number of codes */
+ uInt, /* number of "simple" codes */
+ uIntf *, /* list of base values for non-simple codes */
+ uIntf *, /* list of extra bits for non-simple codes */
+ inflate_huft * FAR*,/* result: starting table */
+ uIntf *, /* maximum lookup bits (returns actual) */
+ z_stream *)); /* for zalloc function */
+
+local voidpf falloc OF((
+ voidpf, /* opaque pointer (not used) */
+ uInt, /* number of items */
+ uInt)); /* size of item */
+
+local void ffree OF((
+ voidpf q, /* opaque pointer (not used) */
+ voidpf p, /* what to free (not used) */
+ uInt n)); /* number of bytes (not used) */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* actually lengths - 2; also see note #13 above about 258 */
+local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
+local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+local uInt cpdext[] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15 /* maximum bit length of any code */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+#ifdef DEBUG_ZLIB
+ uInt inflate_hufts;
+#endif
+
+local int huft_build(b, n, s, d, e, t, m, zs)
+uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
+uInt n; /* number of codes (assumed <= N_MAX) */
+uInt s; /* number of simple-valued codes (0..s-1) */
+uIntf *d; /* list of base values for non-simple codes */
+uIntf *e; /* list of extra bits for non-simple codes */
+inflate_huft * FAR *t; /* result: starting table */
+uIntf *m; /* maximum lookup bits, returns actual */
+z_stream *zs; /* for zalloc function */
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ if the given code set is incomplete (the tables are still built in this
+ case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
+ over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+ uInt a; /* counter for codes of length k */
+ uInt c[BMAX+1]; /* bit length count table */
+ uInt f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register uInt i; /* counter, current code */
+ register uInt j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register uIntf *p; /* pointer into c[], b[], or v[] */
+ inflate_huft *q; /* points to current table */
+ struct inflate_huft_s r; /* table entry for structure assignment */
+ inflate_huft *u[BMAX]; /* table stack */
+ uInt v[N_MAX]; /* values in order of bit length */
+ register int w; /* bits before this table == (l * h) */
+ uInt x[BMAX+1]; /* bit offsets, then code stack */
+ uIntf *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ uInt z; /* number of entries in current table */
+
+
+ /* Generate counts for each bit length */
+ p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+ C4 /* clear c[]--assume BMAX+1 is 16 */
+ p = b; i = n;
+ do {
+ c[*p++]++; /* assume all entries <= BMAX */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+ return Z_OK;
+ }
+
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((uInt)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return Z_DATA_ERROR;
+ if ((y -= c[i]) < 0)
+ return Z_DATA_ERROR;
+ c[i] += y;
+
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
+ q = (inflate_huft *)Z_NULL; /* ditto */
+ z = 0; /* ditto */
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a--)
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (inflate_huft *)ZALLOC
+ (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
+ {
+ if (h)
+ inflate_trees_free(u[0], zs);
+ return Z_MEM_ERROR; /* not enough memory */
+ }
+ q->word.Nalloc = z + 1;
+#ifdef DEBUG_ZLIB
+ inflate_hufts += z + 1;
+#endif
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->next)) = Z_NULL;
+ u[h] = ++q; /* table starts after link */
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.bits = (Byte)l; /* bits to dump before this table */
+ r.exop = (Byte)j; /* bits in this table */
+ r.next = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+ }
+
+ /* set up table entry in r */
+ r.bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.exop = 128 + 64; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
+ r.base = *p++; /* simple code is just the value */
+ }
+ else
+ {
+ r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
+ r.base = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+ }
+ }
+
+
+ /* Return Z_BUF_ERROR if we were given an incomplete table */
+ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+local int inflate_trees_bits(c, bb, tb, z)
+uIntf *c; /* 19 code lengths */
+uIntf *bb; /* bits tree desired/actual depth */
+inflate_huft * FAR *tb; /* bits tree result */
+z_stream *z; /* for zfree function */
+{
+ int r;
+
+ r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed dynamic bit lengths tree";
+ else if (r == Z_BUF_ERROR)
+ {
+ inflate_trees_free(*tb, z);
+ z->msg = "incomplete dynamic bit lengths tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+}
+
+
+local int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
+uInt nl; /* number of literal/length codes */
+uInt nd; /* number of distance codes */
+uIntf *c; /* that many (total) code lengths */
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+z_stream *z; /* for zfree function */
+{
+ int r;
+
+ /* build literal/length tree */
+ if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed literal/length tree";
+ else if (r == Z_BUF_ERROR)
+ {
+ inflate_trees_free(*tl, z);
+ z->msg = "incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+ }
+
+ /* build distance tree */
+ if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed literal/length tree";
+ else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+ r = Z_OK;
+ }
+#else
+ inflate_trees_free(*td, z);
+ z->msg = "incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ inflate_trees_free(*tl, z);
+ return r;
+#endif
+ }
+
+ /* done */
+ return Z_OK;
+}
+
+
+/* build fixed tables only once--keep them here */
+local int fixed_lock = 0;
+local int fixed_built = 0;
+#define FIXEDH 530 /* number of hufts used by fixed tables */
+local uInt fixed_left = FIXEDH;
+local inflate_huft fixed_mem[FIXEDH];
+local uInt fixed_bl;
+local uInt fixed_bd;
+local inflate_huft *fixed_tl;
+local inflate_huft *fixed_td;
+
+
+local voidpf falloc(q, n, s)
+voidpf q; /* opaque pointer (not used) */
+uInt n; /* number of items */
+uInt s; /* size of item */
+{
+ Assert(s == sizeof(inflate_huft) && n <= fixed_left,
+ "inflate_trees falloc overflow");
+ if (q) s++; /* to make some compilers happy */
+ fixed_left -= n;
+ return (voidpf)(fixed_mem + fixed_left);
+}
+
+
+local void ffree(q, p, n)
+voidpf q;
+voidpf p;
+uInt n;
+{
+ Assert(0, "inflate_trees ffree called!");
+ if (q) q = p; /* to make some compilers happy */
+}
+
+
+local int inflate_trees_fixed(bl, bd, tl, td)
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+{
+ /* build fixed tables if not built already--lock out other instances */
+ while (++fixed_lock > 1)
+ fixed_lock--;
+ if (!fixed_built)
+ {
+ int k; /* temporary variable */
+ unsigned c[288]; /* length list for huft_build */
+ z_stream z; /* for falloc function */
+
+ /* set up fake z_stream for memory routines */
+ z.zalloc = falloc;
+ z.zfree = ffree;
+ z.opaque = Z_NULL;
+
+ /* literal table */
+ for (k = 0; k < 144; k++)
+ c[k] = 8;
+ for (; k < 256; k++)
+ c[k] = 9;
+ for (; k < 280; k++)
+ c[k] = 7;
+ for (; k < 288; k++)
+ c[k] = 8;
+ fixed_bl = 7;
+ huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
+
+ /* distance table */
+ for (k = 0; k < 30; k++)
+ c[k] = 5;
+ fixed_bd = 5;
+ huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
+
+ /* done */
+ fixed_built = 1;
+ }
+ fixed_lock--;
+ *bl = fixed_bl;
+ *bd = fixed_bd;
+ *tl = fixed_tl;
+ *td = fixed_td;
+ return Z_OK;
+}
+
+
+local int inflate_trees_free(t, z)
+inflate_huft *t; /* table to free */
+z_stream *z; /* for zfree function */
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register inflate_huft *p, *q;
+
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ p = t;
+ while (p != Z_NULL)
+ {
+ q = (--p)->next;
+ ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
+ p = q;
+ }
+ return Z_OK;
+}
+
+/*+++++*/
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* inflate codes private state */
+struct inflate_codes_state {
+
+ /* mode */
+ enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ START, /* x: set up for LEN */
+ LEN, /* i: get length/literal/eob next */
+ LENEXT, /* i: getting length extra (have base) */
+ DIST, /* i: get distance next */
+ DISTEXT, /* i: getting distance extra */
+ COPY, /* o: copying bytes in window, waiting for space */
+ LIT, /* o: got literal, waiting for output space */
+ WASH, /* o: got eob, possibly still output waiting */
+ END, /* x: got eob and all data flushed */
+ BADCODE} /* x: got error */
+ mode; /* current inflate_codes mode */
+
+ /* mode dependent information */
+ uInt len;
+ union {
+ struct {
+ inflate_huft *tree; /* pointer into tree */
+ uInt need; /* bits needed */
+ } code; /* if LEN or DIST, where in tree */
+ uInt lit; /* if LIT, literal */
+ struct {
+ uInt get; /* bits to get for extra */
+ uInt dist; /* distance back to copy from */
+ } copy; /* if EXT or COPY, where and how much */
+ } sub; /* submode */
+
+ /* mode independent information */
+ Byte lbits; /* ltree bits decoded per branch */
+ Byte dbits; /* dtree bits decoder per branch */
+ inflate_huft *ltree; /* literal/length/eob tree */
+ inflate_huft *dtree; /* distance tree */
+
+};
+
+
+local inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
+uInt bl, bd;
+inflate_huft *tl, *td;
+z_stream *z;
+{
+ inflate_codes_statef *c;
+
+ if ((c = (inflate_codes_statef *)
+ ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
+ {
+ c->mode = START;
+ c->lbits = (Byte)bl;
+ c->dbits = (Byte)bd;
+ c->ltree = tl;
+ c->dtree = td;
+ Tracev((stderr, "inflate: codes new\n"));
+ }
+ return c;
+}
+
+
+local int inflate_codes(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt j; /* temporary storage */
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ Bytef *f; /* pointer to copy strings from */
+ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input and output based on current state */
+ while (1) switch (c->mode)
+ { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ case START: /* x: set up for LEN */
+#ifndef SLOW
+ if (m >= 258 && n >= 10)
+ {
+ UPDATE
+ r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+ LOAD
+ if (r != Z_OK)
+ {
+ c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+#endif /* !SLOW */
+ c->sub.code.need = c->lbits;
+ c->sub.code.tree = c->ltree;
+ c->mode = LEN;
+ case LEN: /* i: get length/literal/eob next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e == 0) /* literal */
+ {
+ c->sub.lit = t->base;
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", t->base));
+ c->mode = LIT;
+ break;
+ }
+ if (e & 16) /* length */
+ {
+ c->sub.copy.get = e & 15;
+ c->len = t->base;
+ c->mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ if (e & 32) /* end of block */
+ {
+ Tracevv((stderr, "inflate: end of block\n"));
+ c->mode = WASH;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = "invalid literal/length code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case LENEXT: /* i: getting length extra (have base) */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->len += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ c->sub.code.need = c->dbits;
+ c->sub.code.tree = c->dtree;
+ Tracevv((stderr, "inflate: length %u\n", c->len));
+ c->mode = DIST;
+ case DIST: /* i: get distance next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e & 16) /* distance */
+ {
+ c->sub.copy.get = e & 15;
+ c->sub.copy.dist = t->base;
+ c->mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = "invalid distance code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case DISTEXT: /* i: getting distance extra */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->sub.copy.dist += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
+ c->mode = COPY;
+ case COPY: /* o: copying bytes in window, waiting for space */
+#ifndef __TURBOC__ /* Turbo C bug for following expression */
+ f = (uInt)(q - s->window) < c->sub.copy.dist ?
+ s->end - (c->sub.copy.dist - (q - s->window)) :
+ q - c->sub.copy.dist;
+#else
+ f = q - c->sub.copy.dist;
+ if ((uInt)(q - s->window) < c->sub.copy.dist)
+ f = s->end - (c->sub.copy.dist - (q - s->window));
+#endif
+ while (c->len)
+ {
+ NEEDOUT
+ OUTBYTE(*f++)
+ if (f == s->end)
+ f = s->window;
+ c->len--;
+ }
+ c->mode = START;
+ break;
+ case LIT: /* o: got literal, waiting for output space */
+ NEEDOUT
+ OUTBYTE(c->sub.lit)
+ c->mode = START;
+ break;
+ case WASH: /* o: got eob, possibly more output */
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ c->mode = END;
+ case END:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADCODE: /* x: got error */
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+local void inflate_codes_free(c, z)
+inflate_codes_statef *c;
+z_stream *z;
+{
+ ZFREE(z, c, sizeof(struct inflate_codes_state));
+ Tracev((stderr, "inflate: codes free\n"));
+}
+
+/*+++++*/
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* copy as much as possible from the sliding window to the output area */
+local int inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt n;
+ Bytef *p, *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
+
+
+/*+++++*/
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
+
+/* Called with number of bytes left to write in window at least 258
+ (the maximum string length) and number of input bytes available
+ at least ten. The ten bytes are six bytes for the longest length/
+ distance pair plus four bytes for overloading the bit buffer. */
+
+local int inflate_fast(bl, bd, tl, td, s, z)
+uInt bl, bd;
+inflate_huft *tl, *td;
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ uInt ml; /* mask for literal/length tree */
+ uInt md; /* mask for distance tree */
+ uInt c; /* bytes to copy */
+ uInt d; /* distance back to copy from */
+ Bytef *r; /* copy source pointer */
+
+ /* load input, output, bit values */
+ LOAD
+
+ /* initialize masks */
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ /* do until not enough input or output space for fast loop */
+ do { /* assume called with m >= 258 && n >= 10 */
+ /* get literal/length code */
+ GRABBITS(20) /* max bits for literal/length code */
+ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ continue;
+ }
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits for length */
+ e &= 15;
+ c = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * length %u\n", c));
+
+ /* decode distance base of block to copy */
+ GRABBITS(15); /* max bits for distance code */
+ e = (t = td + ((uInt)b & md))->exop;
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits to add to distance base */
+ e &= 15;
+ GRABBITS(e) /* get extra bits (up to 13) */
+ d = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * distance %u\n", d));
+
+ /* do the copy */
+ m -= c;
+ if ((uInt)(q - s->window) >= d) /* offset before dest */
+ { /* just copy */
+ r = q - d;
+ *q++ = *r++; c--; /* minimum count is three, */
+ *q++ = *r++; c--; /* so unroll loop a little */
+ }
+ else /* else offset after destination */
+ {
+ e = d - (q - s->window); /* bytes from offset to end */
+ r = s->end - e; /* pointer to offset */
+ if (c > e) /* if source crosses, */
+ {
+ c -= e; /* copy to end of window */
+ do {
+ *q++ = *r++;
+ } while (--e);
+ r = s->window; /* copy rest from start of window */
+ }
+ }
+ do { /* copy all or what's left */
+ *q++ = *r++;
+ } while (--c);
+ break;
+ }
+ else if ((e & 64) == 0)
+ e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
+ else
+ {
+ z->msg = "invalid distance code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ break;
+ }
+ }
+ else if (e & 32)
+ {
+ Tracevv((stderr, "inflate: * end of block\n"));
+ UNGRAB
+ UPDATE
+ return Z_STREAM_END;
+ }
+ else
+ {
+ z->msg = "invalid literal/length code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ } while (m >= 258 && n >= 10);
+
+ /* not enough input or output--restore pointers and return */
+ UNGRAB
+ UPDATE
+ return Z_OK;
+}
+
+
+/*+++++*/
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
+
+char *zlib_version = ZLIB_VERSION;
+
+char *z_errmsg[] = {
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+""};
+
+
+/*+++++*/
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf) {s1 += *buf++; s2 += s1;}
+#define DO2(buf) DO1(buf); DO1(buf);
+#define DO4(buf) DO2(buf); DO2(buf);
+#define DO8(buf) DO4(buf); DO4(buf);
+#define DO16(buf) DO8(buf); DO8(buf);
+
+/* ========================================================================= */
+uLong adler32(adler, buf, len)
+ uLong adler;
+ Bytef *buf;
+ uInt len;
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == Z_NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ k -= 16;
+ }
+ if (k != 0) do {
+ DO1(buf);
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
--- /dev/null
+/* */
+
+/*
+ * This file is derived from zlib.h and zconf.h from the zlib-0.95
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets.
+ */
+
+/*
+ * ==FILEVERSION 960122==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 0.95, Aug 16th, 1995.
+
+ Copyright (C) 1995 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ gzip@prep.ai.mit.edu madler@alumni.caltech.edu
+ */
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+/* #include "zconf.h" */ /* included directly here */
+
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
+
+/*
+ The library does not install any signal handler. It is recommended to
+ add at least a handler for SIGSEGV when decompressing; the library checks
+ the consistency of the input data whenever possible but may go nuts
+ for some forms of corrupted input.
+ */
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
+ * at addresses which are not a multiple of their size.
+ * Under DOS, -DFAR=far or -DFAR=__far may be needed.
+ */
+
+#ifndef STDC
+# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
+# define STDC
+# endif
+#endif
+
+#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
+# include <unix.h>
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ 1 << (windowBits+2) + 1 << (memLevel+9)
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+typedef Byte FAR Bytef;
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+/* end of original zconf.h */
+
+#define ZLIB_VERSION "0.95P"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms may be added later and will have the same
+ stream interface.
+
+ For compression the application must provide the output buffer and
+ may optionally provide the input buffer for optimization. For decompression,
+ the application must provide the input buffer and may optionally provide
+ the output buffer for optimization.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidp opaque; /* private data object passed to zalloc and zfree */
+
+ Byte data_type; /* best guess about the data type: ascii or binary */
+
+} z_stream;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_FULL_FLUSH 2
+#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
+#define Z_FINISH 4
+#define Z_PACKET_FLUSH 5
+/* See deflate() below for the usage of these constants */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+/* error codes for the compression/decompression functions */
+
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Used to set the data_type field */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+extern char *zlib_version;
+/* The application can compare zlib_version and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ */
+
+ /* basic functions */
+
+extern int inflateInit OF((z_stream *strm));
+/*
+ Initializes the internal stream state for decompression. The fields
+ zalloc and zfree must be initialized before by the caller. If zalloc and
+ zfree are set to Z_NULL, inflateInit updates them to use default allocation
+ functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory. msg is set to null if there is no error message.
+ inflateInit does not perform any decompression: this will be done by
+ inflate().
+*/
+
+
+extern int inflate OF((z_stream *strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() always provides as much output as possible
+ (until there is no more input data or no more space in the output buffer).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate().
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
+ inflate flushes as much output as possible to the output buffer. The
+ flushing behavior of inflate is not specified for values of the flush
+ parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
+ current implementation actually flushes as much output as possible
+ anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
+ has been consumed, it is expecting to see the length field of a stored
+ block; if not, it returns Z_DATA_ERROR.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ inflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if the end of the
+ compressed data has been reached and all uncompressed output has been
+ produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
+ the stream structure was inconsistent (for example if next_in or next_out
+ was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
+ progress is possible or if there was not enough room in the output buffer
+ when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
+ call inflateSync to look for a good compression block. */
+
+
+extern int inflateEnd OF((z_stream *strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* advanced functions */
+
+extern int inflateInit2 OF((z_stream *strm,
+ int windowBits));
+/*
+ This is another version of inflateInit with more compression options. The
+ fields next_out, zalloc and zfree must be initialized before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library (the value 16 will be allowed soon). The
+ default value is 15 if inflateInit is used instead. If a compressed stream
+ with a larger window size is given as input, inflate() will return with
+ the error code Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ If next_out is not null, the library will use this buffer for the history
+ buffer; the buffer must either be large enough to hold the entire output
+ data, or have at least 1<<windowBits bytes. If next_out is null, the
+ library will allocate its own buffer (and leave next_out null). next_in
+ need not be provided here but must be provided by the application for the
+ next call of inflate().
+
+ If the history buffer is provided by the application, next_out must
+ never be changed by the application since the decompressor maintains
+ history information inside this buffer from call to call; the application
+ can only reset next_out to the beginning of the history buffer when
+ avail_out is zero and all output has been consumed.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ windowBits < 8). msg is set to null if there is no error message.
+ inflateInit2 does not perform any decompression: this will be done by
+ inflate().
+*/
+
+extern int inflateSync OF((z_stream *strm));
+/*
+ Skips invalid compressed data until the special marker (see deflate()
+ above) can be found, or until all available input is skipped. No output
+ is provided.
+
+ inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no marker has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+extern int inflateReset OF((z_stream *strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* checksum functions */
+
+/*
+ This function is not related to compression but is exported
+ anyway because it might be useful in applications using the
+ compression library.
+*/
+
+extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
+
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+#ifndef _Z_UTIL_H
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+#endif /* _ZLIB_H */
--- /dev/null
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+#
+define_bool CONFIG_UID16 n
+define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
+define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
+define_bool CONFIG_GENERIC_BUST_SPINLOCK n
+define_bool CONFIG_GENERIC_ISA_DMA y
+define_bool CONFIG_HAVE_DEC_LOCK y
+
+source init/Config.in
+
+mainmenu_option next_comment
+comment 'Platform support'
+choice 'Platform Type' \
+ "iSeries CONFIG_PPC_ISERIES \
+ pSeries CONFIG_PPC_PSERIES " pSeries
+define_bool CONFIG_PPC y
+define_bool CONFIG_PPC64 y
+
+bool 'Symmetric multi-processing support' CONFIG_SMP
+if [ "$CONFIG_SMP" = "y" ]; then
+ bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
+ if [ "$CONFIG_PPC_PSERIES" = "y" ]; then
+ bool ' Hardware multithreading' CONFIG_HMT
+ bool ' PCI Enhanced Error Handling' CONFIG_PPC_EEH
+ fi
+fi
+define_bool CONFIG_PREEMPT n
+
+if [ "$CONFIG_PPC_ISERIES" = "y" ]; then
+ define_bool CONFIG_MSCHUNKS y
+else
+bool 'MsChunks Physical to Absolute address translation support' CONFIG_MSCHUNKS
+fi
+
+endmenu
+
+mainmenu_option next_comment
+comment 'General setup'
+
+define_bool CONFIG_ISA n
+define_bool CONFIG_SBUS n
+define_bool CONFIG_MCA n
+define_bool CONFIG_EISA n
+define_bool CONFIG_PCI y
+
+# only elf supported, a.out is not -- Cort
+if [ "$CONFIG_PROC_FS" = "y" ]; then
+ define_bool CONFIG_KCORE_ELF y
+fi
+
+bool 'Kernel Support for 64 bit ELF binaries' CONFIG_BINFMT_ELF
+
+tristate 'Kernel support for 32 bit binaries' CONFIG_BINFMT_ELF32
+
+tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
+
+source drivers/pci/Config.in
+
+bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
+
+if [ "$CONFIG_HOTPLUG" = "y" ]; then
+ source drivers/pcmcia/Config.in
+else
+ define_bool CONFIG_PCMCIA n
+fi
+
+source drivers/parport/Config.in
+
+if [ "$CONFIG_PPC_ISERIES" != "y" ]; then
+ bool 'Support for Open Firmware device tree in /proc' CONFIG_PROC_DEVICETREE
+fi
+
+endmenu
+
+source drivers/mtd/Config.in
+source drivers/pnp/Config.in
+source drivers/block/Config.in
+source drivers/md/Config.in
+
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
+fi
+
+mainmenu_option next_comment
+comment 'ATA/IDE/MFM/RLL support'
+
+tristate 'ATA/IDE/MFM/RLL support' CONFIG_IDE
+
+if [ "$CONFIG_IDE" != "n" ]; then
+ source drivers/ide/Config.in
+else
+ define_bool CONFIG_BLK_DEV_IDE_MODES n
+ define_bool CONFIG_BLK_DEV_HD n
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'SCSI support'
+tristate 'SCSI support' CONFIG_SCSI
+if [ "$CONFIG_SCSI" != "n" ]; then
+ source drivers/scsi/Config.in
+fi
+endmenu
+
+source drivers/ieee1394/Config.in
+
+if [ "$CONFIG_NET" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Network device support'
+
+ bool 'Network device support' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
+ source drivers/net/Config.in
+ if [ "$CONFIG_ATM" = "y" ]; then
+ source drivers/atm/Config.in
+ fi
+ fi
+ endmenu
+fi
+
+source net/ax25/Config.in
+
+source net/irda/Config.in
+
+mainmenu_option next_comment
+comment 'ISDN subsystem'
+
+tristate 'ISDN support' CONFIG_ISDN
+if [ "$CONFIG_ISDN" != "n" ]; then
+ source drivers/isdn/Config.in
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'Old CD-ROM drivers (not SCSI, not IDE)'
+
+bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
+if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
+ source drivers/cdrom/Config.in
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'Console drivers'
+source drivers/video/Config.in
+endmenu
+
+
+if [ "$CONFIG_PPC_ISERIES" = "y" ]; then
+mainmenu_option next_comment
+comment 'iSeries device drivers'
+ dep_tristate 'iSeries Virtual Console Support' CONFIG_VIOCONS $CONFIG_PPC_ISERIES
+ dep_tristate 'iSeries Virtual I/O disk support' CONFIG_VIODASD $CONFIG_PPC_ISERIES
+ if [ "$CONFIG_VIODASD" = "y" -o "$CONFIG_VIODASD" = "m" ]; then
+ bool 'iSeries Virtual disk IDE emulation' CONFIG_VIODASD_IDE
+ fi
+ dep_tristate 'iSeries Virtual I/O CD support' CONFIG_VIOCD $CONFIG_PPC_ISERIES
+ if [ "$CONFIG_VIOCD" = "y" -o "$CONFIG_VIOCD" = "m" ]; then
+ bool 'iSeries Virtual CD Aztech emulation' CONFIG_VIOCD_AZTECH
+ fi
+ dep_tristate 'iSeries Virtual Tape Support' CONFIG_VIOTAPE $CONFIG_PPC_ISERIES
+ dep_tristate 'iSeries Virtual Ethernet driver support' CONFIG_VETH $CONFIG_PPC_ISERIES
+endmenu
+fi
+
+if [ "$CONFIG_VIOCONS" = "n" ]; then
+ if [ "$CONFIG_VIODASD" = "n" ]; then
+ if [ "$CONFIG_VIOTAPE" = "n" ]; then
+ if [ "$CONFIG_VIOCD" = "n" ]; then
+ define_bool CONFIG_VIOPATH n
+ else
+ define_bool CONFIG_VIOPATH y
+ fi
+ else
+ define_bool CONFIG_VIOPATH y
+ fi
+ else
+ define_bool CONFIG_VIOPATH y
+ fi
+else
+ define_bool CONFIG_VIOPATH y
+fi
+
+if [ "$CONFIG_VIOCD" = "y" ]; then
+ define_bool CONFIG_CD_NO_IDESCSI y
+fi
+
+source drivers/char/Config.in
+source fs/Config.in
+
+mainmenu_option next_comment
+comment 'Sound'
+tristate 'Sound card support' CONFIG_SOUND
+if [ "$CONFIG_SOUND" != "n" ]; then
+ source sound/Config.in
+fi
+
+endmenu
+
+source drivers/usb/Config.in
+
+mainmenu_option next_comment
+comment 'Kernel hacking'
+
+bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
+bool 'Include kgdb kernel debugger' CONFIG_KGDB
+bool 'Include xmon kernel debugger' CONFIG_XMON
+bool 'Include PPCDBG realtime debugging' CONFIG_PPCDBG
+endmenu
+
+source lib/Config.in
--- /dev/null
+#
+# Automatically generated make config: don't edit
+#
+# CONFIG_UID16 is not set
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_GENERIC_BUST_SPINLOCK is not set
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_HAVE_DEC_LOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# General setup
+#
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_KMOD is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_ISERIES is not set
+CONFIG_PPC_PSERIES=y
+CONFIG_PPC=y
+CONFIG_PPC64=y
+CONFIG_SMP=y
+CONFIG_IRQ_ALL_CPUS=y
+# CONFIG_HMT is not set
+# CONFIG_PPC_EEH is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_MSCHUNKS is not set
+
+#
+# General setup
+#
+# CONFIG_ISA is not set
+# CONFIG_SBUS is not set
+# CONFIG_MCA is not set
+# CONFIG_EISA is not set
+CONFIG_PCI=y
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_ELF32=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_PCI_NAMES=y
+# CONFIG_HOTPLUG is not set
+# CONFIG_PCMCIA is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+CONFIG_PROC_DEVICETREE=y
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Plug and Play configuration
+#
+# CONFIG_PNP is not set
+# CONFIG_ISAPNP is not set
+# CONFIG_PNPBIOS is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_CISS_SCSI_TAPE is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_MD_LINEAR is not set
+# CONFIG_MD_RAID0 is not set
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID5 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_BLK_DEV_LVM is not set
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+# CONFIG_NETFILTER is not set
+CONFIG_FILTER=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=y
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_IPV6=m
+# CONFIG_KHTTPD is not set
+# CONFIG_ATM is not set
+# CONFIG_VLAN_8021Q is not set
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# ATA/IDE/MFM/RLL support
+#
+# CONFIG_IDE is not set
+# CONFIG_BLK_DEV_IDE_MODES is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_SR_EXTRA_DEVS=2
+CONFIG_CHR_DEV_SG=y
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_MEGARAID is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_MESH is not set
+# CONFIG_SCSI_MAC53C94 is not set
+
+#
+# IEEE 1394 (FireWire) support (EXPERIMENTAL)
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MACE is not set
+# CONFIG_BMAC is not set
+# CONFIG_OAKNET is not set
+# CONFIG_SUNLANCE is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNBMAC is not set
+# CONFIG_SUNQE is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL1 is not set
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_EL16 is not set
+# CONFIG_ELMC is not set
+# CONFIG_ELMC_II is not set
+CONFIG_VORTEX=y
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_APRICOT is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_DE2104X is not set
+# CONFIG_TULIP is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_DGRS is not set
+# CONFIG_DM9102 is not set
+CONFIG_EEPRO100=y
+# CONFIG_LNE390 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_NE3210 is not set
+# CONFIG_ES3210 is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_8139TOO_PIO is not set
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_NEW_RX_RESET is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_RHINE_MMIO is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=y
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+# CONFIG_DL2K is not set
+# CONFIG_MYRI_SBUS is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Token Ring devices
+#
+CONFIG_TR=y
+CONFIG_IBMOL=y
+# CONFIG_IBMLS is not set
+# CONFIG_TMS380TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Console drivers
+#
+
+#
+# Frame-buffer support
+#
+CONFIG_FB=y
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_CLGEN is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+CONFIG_FB_OF=y
+# CONFIG_FB_CONTROL is not set
+# CONFIG_FB_PLATINUM is not set
+# CONFIG_FB_VALKYRIE is not set
+# CONFIG_FB_CT65550 is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_S3TRIO is not set
+# CONFIG_FB_VGA16 is not set
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G100=y
+# CONFIG_FB_MATROX_G450 is not set
+# CONFIG_FB_MATROX_MULTIHEAD is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FBCON_ADVANCED is not set
+CONFIG_FBCON_CFB8=y
+CONFIG_FBCON_CFB16=y
+CONFIG_FBCON_CFB24=y
+CONFIG_FBCON_CFB32=y
+CONFIG_FBCON_FONTWIDTH8_ONLY=y
+CONFIG_FBCON_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_VIOPATH=y
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
+# CONFIG_SERIAL_EXTENDED is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+CONFIG_HVC_CONSOLE=y
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+CONFIG_MOUSE=y
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_INTEL_RNG is not set
+CONFIG_NVRAM=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+
+#
+# File systems
+#
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_BFS_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_UMSDOS_FS is not set
+CONFIG_VFAT_FS=y
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_TMPFS is not set
+CONFIG_RAMFS=y
+CONFIG_ISO9660_FS=y
+# CONFIG_JOLIET is not set
+# CONFIG_ZISOFS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_ROOT_NFS is not set
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_SUNRPC=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_SMB_FS=y
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+# CONFIG_ZISOFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_SMB_NLS=y
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI is not set
+# CONFIG_USB_UHCI_ALT is not set
+# CONFIG_USB_OHCI is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_BLUETOOTH is not set
+# CONFIG_USB_STORAGE is not set
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_HP8200e is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# USB Human Interface Devices (HID)
+#
+# CONFIG_USB_HID is not set
+# CONFIG_USB_HIDDEV is not set
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_WACOM is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_DC2XX is not set
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_SCANNER is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_HPUSBSCSI is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_OV511 is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_STV680 is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_DABUSB is not set
+# CONFIG_USB_KONICAWC is not set
+
+#
+# USB Network adaptors
+#
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_CDCETHER is not set
+# CONFIG_USB_USBNET is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_USS720 is not set
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_AUERSWALD is not set
+
+#
+# Kernel hacking
+#
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_KGDB is not set
+CONFIG_XMON=y
+# CONFIG_PPCDBG is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+# CONFIG_ZLIB_INFLATE is not set
+# CONFIG_ZLIB_DEFLATE is not set
--- /dev/null
+/*
+ * HvCall.c
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/iSeries/HvCall.h>
+#ifndef _HVCALLSC_H
+#include <asm/iSeries/HvCallSc.h>
+#endif
+#include <asm/iSeries/LparData.h>
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+
+/*=====================================================================
+ * Note that this call takes at MOST one page worth of data
+ */
+int HvCall_readLogBuffer(HvLpIndex lpIndex, void *buffer, u64 bufLen)
+{
+ struct HvLpBufferList *bufList;
+ u64 bytesLeft = bufLen;
+ u64 leftThisPage;
+ u64 curPtr = virt_to_absolute( (unsigned long) buffer );
+ u64 retVal;
+ int npages;
+ int i;
+
+ npages = 0;
+ while (bytesLeft) {
+ npages++;
+ leftThisPage = ((curPtr & PAGE_MASK) + PAGE_SIZE) - curPtr;
+
+ if (leftThisPage > bytesLeft)
+ bytesLeft = 0;
+ else
+ bytesLeft -= leftThisPage;
+
+ curPtr = (curPtr & PAGE_MASK) + PAGE_SIZE;
+ }
+
+ if (npages == 0)
+ return 0;
+
+ bufList = (struct HvLpBufferList *)
+ kmalloc(npages * sizeof(struct HvLpBufferList), GFP_ATOMIC);
+ bytesLeft = bufLen;
+ curPtr = virt_to_absolute( (unsigned long) buffer );
+ for(i=0; i<npages; i++) {
+ bufList[i].addr = curPtr;
+
+ leftThisPage = ((curPtr & PAGE_MASK) + PAGE_SIZE) - curPtr;
+
+ if (leftThisPage > bytesLeft) {
+ bufList[i].len = bytesLeft;
+ bytesLeft = 0;
+ } else {
+ bufList[i].len = leftThisPage;
+ bytesLeft -= leftThisPage;
+ }
+
+ curPtr = (curPtr & PAGE_MASK) + PAGE_SIZE;
+ }
+
+
+ retVal = HvCall3(HvCallBaseReadLogBuffer, lpIndex,
+ virt_to_absolute((unsigned long)bufList), bufLen);
+
+ kfree(bufList);
+
+ return (int)retVal;
+}
+
+/*=====================================================================
+ */
+void HvCall_writeLogBuffer(const void *buffer, u64 bufLen)
+{
+ struct HvLpBufferList bufList;
+ u64 bytesLeft = bufLen;
+ u64 leftThisPage;
+ u64 curPtr = virt_to_absolute( (unsigned long) buffer );
+
+ while (bytesLeft) {
+ bufList.addr = curPtr;
+
+ leftThisPage = ((curPtr & PAGE_MASK) + PAGE_SIZE) - curPtr;
+
+ if (leftThisPage > bytesLeft) {
+ bufList.len = bytesLeft;
+ bytesLeft = 0;
+ } else {
+ bufList.len = leftThisPage;
+ bytesLeft -= leftThisPage;
+ }
+
+ curPtr = (curPtr & PAGE_MASK) + PAGE_SIZE;
+ }
+
+
+ HvCall2(HvCallBaseWriteLogBuffer,
+ virt_to_absolute((unsigned long)&bufList), bufLen);
+
+}
--- /dev/null
+/*
+ * HvLpConfig.c
+ * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _HVLPCONFIG_H
+#include <asm/iSeries/HvLpConfig.h>
+#endif
+
+HvLpIndex HvLpConfig_getLpIndex_outline(void)
+{
+ return HvLpConfig_getLpIndex();
+}
+
--- /dev/null
+/*
+ * Copyright 2001 Mike Corrigan IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iSeries/HvCallEvent.h>
+#include <asm/iSeries/LparData.h>
+
+/* Array of LpEvent handler functions */
+LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
+unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
+
+/* Register a handler for an LpEvent type */
+
+int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
+{
+ int rc = 1;
+ if ( eventType < HvLpEvent_Type_NumTypes ) {
+ lpEventHandler[eventType] = handler;
+ rc = 0;
+ }
+ return rc;
+
+}
+
+int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
+{
+ int rc = 1;
+ if ( eventType < HvLpEvent_Type_NumTypes ) {
+ if ( !lpEventHandlerPaths[eventType] ) {
+ lpEventHandler[eventType] = NULL;
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+/* (lpIndex is the partition index of the target partition.
+ * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
+ * indicates to use our partition index - for the other types)
+ */
+int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
+{
+ int rc = 1;
+ if ( eventType < HvLpEvent_Type_NumTypes &&
+ lpEventHandler[eventType] ) {
+ if ( lpIndex == 0 )
+ lpIndex = itLpNaca.xLpIndex;
+ HvCallEvent_openLpEventPath( lpIndex, eventType );
+ ++lpEventHandlerPaths[eventType];
+ rc = 0;
+ }
+ return rc;
+}
+
+int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
+{
+ int rc = 1;
+ if ( eventType < HvLpEvent_Type_NumTypes &&
+ lpEventHandler[eventType] &&
+ lpEventHandlerPaths[eventType] ) {
+ if ( lpIndex == 0 )
+ lpIndex = itLpNaca.xLpIndex;
+ HvCallEvent_closeLpEventPath( lpIndex, eventType );
+ --lpEventHandlerPaths[eventType];
+ rc = 0;
+ }
+ return rc;
+}
+
--- /dev/null
+/*
+ * ItLpQueue.c
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/system.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iSeries/HvCallEvent.h>
+#include <asm/iSeries/LparData.h>
+
+static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
+{
+ int t;
+ u32 * inUseP = &(lpQueue->xInUseWord);
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%2 \n\
+ cmpi 0,%0,0 \n\
+ li %0,0 \n\
+ bne- 2f \n\
+ addi %0,%0,1 \n\
+ stwcx. %0,0,%2 \n\
+ bne- 1b \n\
+2: eieio"
+ : "=&r" (t), "=m" (lpQueue->xInUseWord)
+ : "r" (inUseP), "m" (lpQueue->xInUseWord)
+ : "cc");
+
+ return t;
+}
+
+static __inline__ void clear_inUse( struct ItLpQueue * lpQueue )
+{
+ lpQueue->xInUseWord = 0;
+}
+
+/* Array of LpEvent handler functions */
+extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
+unsigned long ItLpQueueInProcess = 0;
+
+struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
+{
+ struct HvLpEvent * nextLpEvent =
+ (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
+ if ( nextLpEvent->xFlags.xValid ) {
+ /* Set pointer to next potential event */
+ lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
+ LpEventAlign ) /
+ LpEventAlign ) *
+ LpEventAlign;
+ /* Wrap to beginning if no room at end */
+ if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
+ lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
+ }
+ else
+ nextLpEvent = NULL;
+
+ return nextLpEvent;
+}
+
+int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
+{
+ int retval = 0;
+ struct HvLpEvent * nextLpEvent;
+ if ( lpQueue ) {
+ nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
+ retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
+ }
+ return retval;
+}
+
+void ItLpQueue_clearValid( struct HvLpEvent * event )
+{
+ /* Clear the valid bit of the event
+ * Also clear bits within this event that might
+ * look like valid bits (on 64-byte boundaries)
+ */
+ unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
+ LpEventAlign ) - 1;
+ switch ( extra ) {
+ case 3:
+ ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
+ case 2:
+ ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
+ case 1:
+ ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
+ case 0:
+ ;
+ }
+ mb();
+ event->xFlags.xValid = 0;
+}
+
+unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
+{
+ unsigned numIntsProcessed = 0;
+ struct HvLpEvent * nextLpEvent;
+
+ /* If we have recursed, just return */
+ if ( !set_inUse( lpQueue ) )
+ return 0;
+
+ if (ItLpQueueInProcess == 0)
+ ItLpQueueInProcess = 1;
+ else
+ BUG();
+
+ for (;;) {
+ nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue );
+ if ( nextLpEvent ) {
+ /* Count events to return to caller
+ * and count processed events in lpQueue
+ */
+ ++numIntsProcessed;
+ lpQueue->xLpIntCount++;
+ /* Call appropriate handler here, passing
+ * a pointer to the LpEvent. The handler
+ * must make a copy of the LpEvent if it
+ * needs it in a bottom half. (perhaps for
+ * an ACK)
+ *
+ * Handlers are responsible for ACK processing
+ *
+ * The Hypervisor guarantees that LpEvents will
+ * only be delivered with types that we have
+ * registered for, so no type check is necessary
+ * here!
+ */
+ if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
+ lpQueue->xLpIntCountByType[nextLpEvent->xType]++;
+ if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
+ lpEventHandler[nextLpEvent->xType] )
+ lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
+ else
+ printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
+
+ ItLpQueue_clearValid( nextLpEvent );
+ }
+ else /* No more valid events
+ * If overflow events are pending
+ * process them
+ */
+ if ( lpQueue->xPlicOverflowIntPending ) {
+ HvCallEvent_getOverflowLpEvents(
+ lpQueue->xIndex);
+ }
+ else /* If nothing left then we are done */
+ break;
+ }
+
+ ItLpQueueInProcess = 0;
+ mb();
+ clear_inUse( lpQueue );
+
+ get_paca()->lpEvent_count += numIntsProcessed;
+
+ return numIntsProcessed;
+}
--- /dev/null
+/*
+ * Copyright 2001 Mike Corrigan, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define __KERNEL__ 1
+#include <asm/types.h>
+#include <asm/page.h>
+#include <stddef.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/init.h>
+#include <asm/Naca.h>
+#include <asm/abs_addr.h>
+#include <asm/bitops.h>
+#include <asm/iSeries/ItLpNaca.h>
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/iSeries/ItLpRegSave.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/HvReleaseData.h>
+#include <asm/iSeries/LparMap.h>
+#include <asm/iSeries/ItVpdAreas.h>
+#include <asm/iSeries/ItIplParmsReal.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/IoHriProcessorVpd.h>
+#include <asm/iSeries/ItSpCommArea.h>
+
+extern char _start_boltedStacks[];
+
+/* The LparMap data is now located at offset 0x6000 in head.S
+ * It was put there so that the HvReleaseData could address it
+ * with a 32-bit offset as required by the iSeries hypervisor
+ *
+ * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
+ * the Naca via the HvReleaseData area. The HvReleaseData has the
+ * offset into the Naca of the pointer to the ItVpdAreas.
+ */
+
+extern struct ItVpdAreas itVpdAreas;
+
+/* The LpQueue is used to pass event data from the hypervisor to
+ * the partition. This is where I/O interrupt events are communicated.
+ * The ItLpQueue must be initialized (even though only to all zeros)
+ * If it were uninitialized (in .bss) it would get zeroed after the
+ * kernel gets control. The hypervisor will have filled in some fields
+ * before the kernel gets control. By initializing it we keep it out
+ * of the .bss
+ */
+
+struct ItLpQueue xItLpQueue = {};
+
+
+/* The HvReleaseData is the root of the information shared between
+ * the hypervisor and Linux.
+ */
+
+struct HvReleaseData hvReleaseData = {
+ 0xc8a5d9c4, /* desc = "HvRD" ebcdic */
+ sizeof(struct HvReleaseData),
+ offsetof(struct Naca, xItVpdAreas),
+ (struct Naca *)(KERNELBASE+0x4000), /* 64-bit Naca address */
+ 0x6000, /* offset of LparMap within loadarea (see head.S) */
+ 0,
+ 1, /* tags inactive */
+ 0, /* 64 bit */
+ 0, /* shared processors */
+ 0, /* HMT allowed */
+ 6, /* TEMP: This allows non-GA driver */
+ 4, /* We are v5r2m0 */
+ 3, /* Min supported PLIC = v5r1m0 */
+ 3, /* Min usuable PLIC = v5r1m0 */
+ { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4 "*/
+ 0xa7, 0x40, 0xf2, 0x4b,
+ 0xf4, 0x4b, 0xf6, 0xf4 },
+ {0}
+};
+
+extern void SystemReset_Iseries(void);
+extern void MachineCheck_Iseries(void);
+extern void DataAccess_Iseries(void);
+extern void InstructionAccess_Iseries(void);
+extern void HardwareInterrupt_Iseries(void);
+extern void Alignment_Iseries(void);
+extern void ProgramCheck_Iseries(void);
+extern void FPUnavailable_Iseries(void);
+extern void Decrementer_Iseries(void);
+extern void Trap_0a_Iseries(void);
+extern void Trap_0b_Iseries(void);
+extern void SystemCall_Iseries(void);
+extern void SingleStep_Iseries(void);
+extern void Trap_0e_Iseries(void);
+extern void PerformanceMonitor_Iseries(void);
+extern void DataAccessSLB_Iseries(void);
+extern void InstructionAccessSLB_Iseries(void);
+
+struct ItLpNaca itLpNaca = {
+ 0xd397d581, /* desc = "LpNa" ebcdic */
+ 0x0400, /* size of ItLpNaca */
+ 0x0300, 19, /* offset to int array, # ents */
+ 0, 0, 0, /* Part # of primary, serv, me */
+ 0, 0x100, /* # of LP queues, offset */
+ 0, 0, 0, /* Piranha stuff */
+ { 0,0,0,0,0 }, /* reserved */
+ 0,0,0,0,0,0,0, /* stuff */
+ { 0,0,0,0,0 }, /* reserved */
+ 0, /* reserved */
+ 0, /* VRM index of PLIC */
+ 0, 0, /* min supported, compat SLIC */
+ 0, /* 64-bit addr of load area */
+ 0, /* chunks for load area */
+ 0, 0, /* PASE mask, seg table */
+ { 0 }, /* 64 reserved bytes */
+ { 0 }, /* 128 reserved bytes */
+ { 0 }, /* Old LP Queue */
+ { 0 }, /* 384 reserved bytes */
+ {
+ (u64)SystemReset_Iseries, /* 0x100 System Reset */
+ (u64)MachineCheck_Iseries, /* 0x200 Machine Check */
+ (u64)DataAccess_Iseries, /* 0x300 Data Access */
+ (u64)InstructionAccess_Iseries, /* 0x400 Instruction Access */
+ (u64)HardwareInterrupt_Iseries, /* 0x500 External */
+ (u64)Alignment_Iseries, /* 0x600 Alignment */
+ (u64)ProgramCheck_Iseries, /* 0x700 Program Check */
+ (u64)FPUnavailable_Iseries, /* 0x800 FP Unavailable */
+ (u64)Decrementer_Iseries, /* 0x900 Decrementer */
+ (u64)Trap_0a_Iseries, /* 0xa00 Trap 0A */
+ (u64)Trap_0b_Iseries, /* 0xb00 Trap 0B */
+ (u64)SystemCall_Iseries, /* 0xc00 System Call */
+ (u64)SingleStep_Iseries, /* 0xd00 Single Step */
+ (u64)Trap_0e_Iseries, /* 0xe00 Trap 0E */
+ (u64)PerformanceMonitor_Iseries,/* 0xf00 Performance Monitor */
+ 0, /* int 0x1000 */
+ 0, /* int 0x1010 */
+ 0, /* int 0x1020 CPU ctls */
+ (u64)HardwareInterrupt_Iseries, /* SC Ret Hdlr */
+ (u64)DataAccessSLB_Iseries, /* 0x380 D-SLB */
+ (u64)InstructionAccessSLB_Iseries /* 0x480 I-SLB */
+ }
+};
+
+struct ItIplParmsReal xItIplParmsReal = {};
+
+struct IoHriProcessorVpd xIoHriProcessorVpd[maxProcessors] = {
+ {
+ xInstCacheOperandSize: 32,
+ xDataCacheOperandSize: 32,
+ xProcFreq: 50000000,
+ xTimeBaseFreq: 50000000,
+ xPVR: 0x3600
+ }
+};
+
+
+u64 xMsVpd[3400] = {}; /* Space for Main Store Vpd 27,200 bytes */
+
+u64 xRecoveryLogBuffer[32] = {}; /* Space for Recovery Log Buffer */
+
+struct SpCommArea xSpCommArea = {
+ 0xE2D7C3C2,
+ 1,
+ {0},
+ 0, 0, 0, 0, {0}
+};
+
+struct ItVpdAreas itVpdAreas = {
+ 0xc9a3e5c1, /* "ItVA" */
+ sizeof( struct ItVpdAreas ),
+ 0, 0,
+ 26, /* # VPD array entries */
+ 10, /* # DMA array entries */
+ maxProcessors*2, maxProcessors, /* Max logical, physical procs */
+ offsetof(struct ItVpdAreas,xPlicDmaToks),/* offset to DMA toks */
+ offsetof(struct ItVpdAreas,xSlicVpdAdrs),/* offset to VPD addrs */
+ offsetof(struct ItVpdAreas,xPlicDmaLens),/* offset to DMA lens */
+ offsetof(struct ItVpdAreas,xSlicVpdLens),/* offset to VPD lens */
+ 0, /* max slot labels */
+ 1, /* max LP queues */
+ {0}, {0}, /* reserved */
+ {0}, /* DMA lengths */
+ {0}, /* DMA tokens */
+ { /* VPD lengths */
+ 0,0,0,0, /* 0 - 3 */
+ sizeof(struct Paca), /* 4 length of Paca */
+ 0, /* 5 */
+ sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
+ 26992, /* 7 length of MS VPD */
+ 0, /* 8 */
+ sizeof(struct ItLpNaca),/* 9 length of LP Naca */
+ 0, /* 10 */
+ 256, /* 11 length of Recovery Log Buf */
+ sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
+ 0,0,0, /* 13 - 15 */
+ sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
+ 0,0,0,0,0,0, /* 17 - 22 */
+ sizeof(struct ItLpQueue),/* 23 length of Lp Queue */
+ 0,0 /* 24 - 25 */
+ },
+ { /* VPD addresses */
+ 0,0,0,0, /* 0 - 3 */
+ &xPaca[0], /* 4 first Paca */
+ 0, /* 5 */
+ &xItIplParmsReal, /* 6 IPL parms */
+ &xMsVpd, /* 7 MS Vpd */
+ 0, /* 8 */
+ &itLpNaca, /* 9 LpNaca */
+ 0, /* 10 */
+ &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
+ &xSpCommArea, /* 12 SP Comm Area */
+ 0,0,0, /* 13 - 15 */
+ &xIoHriProcessorVpd, /* 16 Proc Vpd */
+ 0,0,0,0,0,0, /* 17 - 22 */
+ &xItLpQueue, /* 23 Lp Queue */
+ 0,0
+ }
+};
+
+
+/* Data area used in flush_hash_page */
+long long flush_hash_page_hpte[2];
+
+struct msChunks msChunks = {0, 0, 0, 0, NULL};
+
+/* Depending on whether this is called from iSeries or pSeries setup
+ * code, the location of the msChunks struct may or may not have
+ * to be reloc'd, so we force the caller to do that for us by passing
+ * in a pointer to the structure.
+ */
+unsigned long
+msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size)
+{
+ unsigned long offset = reloc_offset();
+ struct msChunks *_msChunks = PTRRELOC(&msChunks);
+
+ _msChunks->num_chunks = num_chunks;
+ _msChunks->chunk_size = chunk_size;
+ _msChunks->chunk_shift = __ilog2(chunk_size);
+ _msChunks->chunk_mask = (1UL<<_msChunks->chunk_shift)-1;
+
+ mem = _ALIGN(mem, sizeof(msChunks_entry));
+ _msChunks->abs = (msChunks_entry *)(mem + offset);
+ mem += num_chunks * sizeof(msChunks_entry);
+
+ return mem;
+}
+
+
+
+
--- /dev/null
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+USE_STANDARD_AS_RULE := true
+
+EXTRA_CFLAGS = -mno-minimal-toc
+
+KHEAD := head.o
+
+all: $(KHEAD) kernel.o
+
+O_TARGET := kernel.o
+
+export-objs := ppc_ksyms.o setup.o
+
+obj-y := ppc_ksyms.o setup.o entry.o traps.o irq.o idle.o \
+ time.o process.o signal.o syscalls.o misc.o ptrace.o \
+ align.o semaphore.o bitops.o stab.o htab.o pacaData.o \
+ LparData.o udbg.o binfmt_elf32.o sys_ppc32.o sys32.o \
+ ioctl32.o ptrace32.o signal32.o open_pic.o xics.o \
+ pmc.o mf_proc.o proc_pmc.o iSeries_setup.o \
+ ItLpQueue.o hvCall.o mf.o HvLpEvent.o ras.o \
+ iSeries_proc.o HvCall.o HvLpConfig.o \
+ rtc.o init_task.o
+
+obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_dma.o
+obj-$(CONFIG_PPC_EEH) += eeh.o
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o iSeries_IoMmTable.o iSeries_irq.o iSeries_VpdInfo.o XmPciLpEvent.o
+endif
+ifeq ($(CONFIG_PPC_PSERIES),y)
+obj-$(CONFIG_PCI) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o
+
+obj-y += rtasd.o
+endif
+
+obj-$(CONFIG_KGDB) += ppc-stub.o
+
+obj-$(CONFIG_SMP) += smp.o
+
+# tibit: for matrox_init2()
+ifeq ($(CONFIG_NVRAM),y)
+ obj-$(CONFIG_NVRAM) += pmac_nvram.o
+endif
+
+obj-y += prom.o lmb.o rtas.o rtas-proc.o chrp_setup.o i8259.o
+
+include $(TOPDIR)/Rules.make
+
+#
+# This is just to get the dependencies...
+#
+
+head.o: head.S ppc_defs.h
+
+ppc_defs.h: mk_defs.c ppc_defs.head \
+ $(TOPDIR)/include/asm/mmu.h \
+ $(TOPDIR)/include/asm/processor.h \
+ $(TOPDIR)/include/asm/pgtable.h \
+ $(TOPDIR)/include/asm/ptrace.h
+ $(CC) $(CFLAGS) -S mk_defs.c
+ cp ppc_defs.head ppc_defs.h
+# for bk, this way we can write to the file even if it's not checked out
+ chmod u+w ppc_defs.h
+ grep '^#define' mk_defs.s >> ppc_defs.h
+ rm mk_defs.s
--- /dev/null
+/*
+ * File XmPciLpEvent.h created by Wayne Holm on Mon Jan 15 2001.
+ *
+ * This module handles PCI interrupt events sent by the iSeries Hypervisor.
+*/
+
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/blk.h>
+#include <linux/ide.h>
+
+#include <asm/iSeries/HvTypes.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iSeries/HvCallPci.h>
+#include <asm/iSeries/XmPciLpEvent.h>
+#include <asm/ppcdebug.h>
+
+long Pci_Interrupt_Count = 0;
+long Pci_Event_Count = 0;
+
+enum XmPciLpEvent_Subtype {
+ XmPciLpEvent_BusCreated = 0, // PHB has been created
+ XmPciLpEvent_BusFailed = 1, // PHB has failed
+ XmPciLpEvent_BusRecovered = 12, // PHB has been recovered
+ XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed
+ XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered
+ XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt
+};
+
+struct XmPciLpEvent_BusInterrupt {
+ HvBusNumber busNumber;
+ HvSubBusNumber subBusNumber;
+};
+
+struct XmPciLpEvent_NodeInterrupt {
+ HvBusNumber busNumber;
+ HvSubBusNumber subBusNumber;
+ HvAgentId deviceId;
+};
+
+struct XmPciLpEvent {
+ struct HvLpEvent hvLpEvent;
+
+ union {
+ u64 alignData; // Align on an 8-byte boundary
+
+ struct {
+ u32 fisr;
+ HvBusNumber busNumber;
+ HvSubBusNumber subBusNumber;
+ HvAgentId deviceId;
+ } slotInterrupt;
+
+ struct XmPciLpEvent_BusInterrupt busFailed;
+ struct XmPciLpEvent_BusInterrupt busRecovered;
+ struct XmPciLpEvent_BusInterrupt busCreated;
+
+ struct XmPciLpEvent_NodeInterrupt nodeFailed;
+ struct XmPciLpEvent_NodeInterrupt nodeRecovered;
+
+ } eventData;
+
+};
+
+static void intReceived(struct XmPciLpEvent* eventParm, struct pt_regs* regsParm);
+
+static void XmPciLpEvent_handler( struct HvLpEvent* eventParm, struct pt_regs* regsParm)
+{
+ //PPCDBG(PPCDBG_BUSWALK,"XmPciLpEvent_handler, type 0x%x\n",eventParm->xType );
+ ++Pci_Event_Count;
+
+ if (eventParm && eventParm->xType == HvLpEvent_Type_PciIo) {
+ switch( eventParm->xFlags.xFunction ) {
+ case HvLpEvent_Function_Int:
+ intReceived( (struct XmPciLpEvent*)eventParm, regsParm );
+ break;
+ case HvLpEvent_Function_Ack:
+ printk(KERN_ERR "XmPciLpEvent.c: unexpected ack received\n");
+ break;
+ default:
+ printk(KERN_ERR "XmPciLpEvent.c: unexpected event function %d\n",(int)eventParm->xFlags.xFunction);
+ break;
+ }
+ }
+ else if (event) {
+ printk(KERN_ERR "XmPciLpEvent.c: Unrecognized PCI event type 0x%x\n",(int)eventParm->xType);
+ }
+ else {
+ printk(KERN_ERR "XmPciLpEvent.c: NULL event received\n");
+ }
+}
+
+static void intReceived(struct XmPciLpEvent* eventParm, struct pt_regs* regsParm)
+{
+ int irq;
+
+ ++Pci_Interrupt_Count;
+ //PPCDBG(PPCDBG_BUSWALK,"PCI: XmPciLpEvent.c: intReceived\n");
+
+ switch (eventParm->hvLpEvent.xSubtype) {
+ case XmPciLpEvent_SlotInterrupt:
+ irq = eventParm->hvLpEvent.xCorrelationToken;
+ /* Dispatch the interrupt handlers for this irq */
+ ppc_irq_dispatch_handler(regsParm, irq);
+ HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
+ eventParm->eventData.slotInterrupt.subBusNumber,
+ eventParm->eventData.slotInterrupt.deviceId);
+ break;
+ /* Ignore error recovery events for now */
+ case XmPciLpEvent_BusCreated:
+ printk(KERN_INFO "XmPciLpEvent.c: system bus %d created\n", eventParm->eventData.busCreated.busNumber);
+ break;
+ case XmPciLpEvent_BusFailed:
+ printk(KERN_INFO "XmPciLpEvent.c: system bus %d failed\n", eventParm->eventData.busFailed.busNumber);
+ break;
+ case XmPciLpEvent_BusRecovered:
+ printk(KERN_INFO "XmPciLpEvent.c: system bus %d recovered\n", eventParm->eventData.busRecovered.busNumber);
+ break;
+ case XmPciLpEvent_NodeFailed:
+ printk(KERN_INFO "XmPciLpEvent.c: multi-adapter bridge %d/%d/%d failed\n", eventParm->eventData.nodeFailed.busNumber, eventParm->eventData.nodeFailed.subBusNumber, eventParm->eventData.nodeFailed.deviceId);
+ break;
+ case XmPciLpEvent_NodeRecovered:
+ printk(KERN_INFO "XmPciLpEvent.c: multi-adapter bridge %d/%d/%d recovered\n", eventParm->eventData.nodeRecovered.busNumber, eventParm->eventData.nodeRecovered.subBusNumber, eventParm->eventData.nodeRecovered.deviceId);
+ break;
+ default:
+ printk(KERN_ERR "XmPciLpEvent.c: unrecognized event subtype 0x%x\n",
+ eventParm->hvLpEvent.xSubtype);
+ break;
+ };
+}
+
+
+/* This should be called sometime prior to buswalk (init_IRQ would be good) */
+int XmPciLpEvent_init()
+{
+ int xRc;
+ PPCDBG(PPCDBG_BUSWALK,"XmPciLpEvent_init, Register Event type 0x%04X\n",HvLpEvent_Type_PciIo);
+
+ xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, &XmPciLpEvent_handler);
+ if (xRc == 0) {
+ xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
+ if (xRc != 0) {
+ printk(KERN_ERR "XmPciLpEvent.c: open event path failed with rc 0x%x\n", xRc);
+ }
+ }
+ else {
+ printk(KERN_ERR "XmPciLpEvent.c: register handler failed with rc 0x%x\n", xRc);
+ }
+ return xRc;
+}
+
--- /dev/null
+/*
+ * align.c - handle alignment exceptions for the Power PC.
+ *
+ * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ * Copyright (c) 1998-1999 TiVo, Inc.
+ * PowerPC 403GCX modifications.
+ * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * PowerPC 403GCX/405GP modifications.
+ * Copyright (c) 2001 PPC64 team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/cache.h>
+
+struct aligninfo {
+ unsigned char len;
+ unsigned char flags;
+};
+
+#define OPCD(inst) (((inst) & 0xFC000000) >> 26)
+#define RS(inst) (((inst) & 0x03E00000) >> 21)
+#define RA(inst) (((inst) & 0x001F0000) >> 16)
+#define IS_DFORM(code) ((code) >= 32 && (code) <= 47)
+
+#define INVALID { 0, 0 }
+
+#define LD 1 /* load */
+#define ST 2 /* store */
+#define SE 4 /* sign-extend value */
+#define F 8 /* to/from fp regs */
+#define U 0x10 /* update index register */
+#define M 0x20 /* multiple load/store */
+#define S 0x40 /* single-precision fp, or byte-swap value */
+#define HARD 0x80 /* string, stwcx. */
+#define D 0x100 /* double-word load/store */
+
+#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
+
+/*
+ * The PowerPC stores certain bits of the instruction that caused the
+ * alignment exception in the DSISR register. This array maps those
+ * bits to information about the operand length and what the
+ * instruction would do.
+ */
+static struct aligninfo aligninfo[128] = {
+ { 4, LD }, /* 00 0 0000: lwz / lwarx */
+ INVALID, /* 00 0 0001 */
+ { 4, ST }, /* 00 0 0010: stw */
+ INVALID, /* 00 0 0011 */
+ { 2, LD }, /* 00 0 0100: lhz */
+ { 2, LD+SE }, /* 00 0 0101: lha */
+ { 2, ST }, /* 00 0 0110: sth */
+ { 4, LD+M }, /* 00 0 0111: lmw */
+ { 4, LD+F+S }, /* 00 0 1000: lfs */
+ { 8, LD+F }, /* 00 0 1001: lfd */
+ { 4, ST+F+S }, /* 00 0 1010: stfs */
+ { 8, ST+F }, /* 00 0 1011: stfd */
+ INVALID, /* 00 0 1100 */
+ { 8, LD }, /* 00 0 1101: ld */
+ INVALID, /* 00 0 1110 */
+ { 8, ST }, /* 00 0 1111: std */
+ { 4, LD+U }, /* 00 1 0000: lwzu */
+ INVALID, /* 00 1 0001 */
+ { 4, ST+U }, /* 00 1 0010: stwu */
+ INVALID, /* 00 1 0011 */
+ { 2, LD+U }, /* 00 1 0100: lhzu */
+ { 2, LD+SE+U }, /* 00 1 0101: lhau */
+ { 2, ST+U }, /* 00 1 0110: sthu */
+ { 4, ST+M }, /* 00 1 0111: stmw */
+ { 4, LD+F+S+U }, /* 00 1 1000: lfsu */
+ { 8, LD+F+U }, /* 00 1 1001: lfdu */
+ { 4, ST+F+S+U }, /* 00 1 1010: stfsu */
+ { 8, ST+F+U }, /* 00 1 1011: stfdu */
+ INVALID, /* 00 1 1100 */
+ { 8, ST }, /* 00 1 1101: std */
+ INVALID, /* 00 1 1110 */
+ INVALID, /* 00 1 1111 */
+ { 8, LD }, /* 01 0 0000: ldx */
+ INVALID, /* 01 0 0001 */
+ { 8, ST }, /* 01 0 0010: stdx */
+ INVALID, /* 01 0 0011 */
+ INVALID, /* 01 0 0100 */
+ INVALID, /* 01 0 0101: lwax?? */
+ INVALID, /* 01 0 0110 */
+ INVALID, /* 01 0 0111 */
+ { 0, LD+HARD }, /* 01 0 1000: lswx */
+ { 0, LD+HARD }, /* 01 0 1001: lswi */
+ { 0, ST+HARD }, /* 01 0 1010: stswx */
+ { 0, ST+HARD }, /* 01 0 1011: stswi */
+ INVALID, /* 01 0 1100 */
+ { 8, LD+U }, /* 01 0 1101: ldu */
+ INVALID, /* 01 0 1110 */
+ { 8, ST+U }, /* 01 0 1111: stdu */
+ { 8, LD+U }, /* 01 1 0000: ldux */
+ INVALID, /* 01 1 0001 */
+ { 8, ST+U }, /* 01 1 0010: stdux */
+ INVALID, /* 01 1 0011 */
+ INVALID, /* 01 1 0100 */
+ INVALID, /* 01 1 0101: lwaux?? */
+ INVALID, /* 01 1 0110 */
+ INVALID, /* 01 1 0111 */
+ INVALID, /* 01 1 1000 */
+ INVALID, /* 01 1 1001 */
+ INVALID, /* 01 1 1010 */
+ INVALID, /* 01 1 1011 */
+ INVALID, /* 01 1 1100 */
+ INVALID, /* 01 1 1101 */
+ INVALID, /* 01 1 1110 */
+ INVALID, /* 01 1 1111 */
+ INVALID, /* 10 0 0000 */
+ INVALID, /* 10 0 0001 */
+ { 0, ST+HARD }, /* 10 0 0010: stwcx. */
+ INVALID, /* 10 0 0011 */
+ INVALID, /* 10 0 0100 */
+ INVALID, /* 10 0 0101 */
+ INVALID, /* 10 0 0110 */
+ INVALID, /* 10 0 0111 */
+ { 4, LD+S }, /* 10 0 1000: lwbrx */
+ INVALID, /* 10 0 1001 */
+ { 4, ST+S }, /* 10 0 1010: stwbrx */
+ INVALID, /* 10 0 1011 */
+ { 2, LD+S }, /* 10 0 1100: lhbrx */
+ INVALID, /* 10 0 1101 */
+ { 2, ST+S }, /* 10 0 1110: sthbrx */
+ INVALID, /* 10 0 1111 */
+ INVALID, /* 10 1 0000 */
+ INVALID, /* 10 1 0001 */
+ INVALID, /* 10 1 0010 */
+ INVALID, /* 10 1 0011 */
+ INVALID, /* 10 1 0100 */
+ INVALID, /* 10 1 0101 */
+ INVALID, /* 10 1 0110 */
+ INVALID, /* 10 1 0111 */
+ INVALID, /* 10 1 1000 */
+ INVALID, /* 10 1 1001 */
+ INVALID, /* 10 1 1010 */
+ INVALID, /* 10 1 1011 */
+ INVALID, /* 10 1 1100 */
+ INVALID, /* 10 1 1101 */
+ INVALID, /* 10 1 1110 */
+ { 0, ST+HARD }, /* 10 1 1111: dcbz */
+ { 4, LD }, /* 11 0 0000: lwzx */
+ INVALID, /* 11 0 0001 */
+ { 4, ST }, /* 11 0 0010: stwx */
+ INVALID, /* 11 0 0011 */
+ { 2, LD }, /* 11 0 0100: lhzx */
+ { 2, LD+SE }, /* 11 0 0101: lhax */
+ { 2, ST }, /* 11 0 0110: sthx */
+ INVALID, /* 11 0 0111 */
+ { 4, LD+F+S }, /* 11 0 1000: lfsx */
+ { 8, LD+F }, /* 11 0 1001: lfdx */
+ { 4, ST+F+S }, /* 11 0 1010: stfsx */
+ { 8, ST+F }, /* 11 0 1011: stfdx */
+ INVALID, /* 11 0 1100 */
+ INVALID, /* 11 0 1101 */
+ INVALID, /* 11 0 1110 */
+ INVALID, /* 11 0 1111 */
+ { 4, LD+U }, /* 11 1 0000: lwzux */
+ INVALID, /* 11 1 0001 */
+ { 4, ST+U }, /* 11 1 0010: stwux */
+ INVALID, /* 11 1 0011 */
+ { 2, LD+U }, /* 11 1 0100: lhzux */
+ { 2, LD+SE+U }, /* 11 1 0101: lhaux */
+ { 2, ST+U }, /* 11 1 0110: sthux */
+ INVALID, /* 11 1 0111 */
+ { 4, LD+F+S+U }, /* 11 1 1000: lfsux */
+ { 8, LD+F+U }, /* 11 1 1001: lfdux */
+ { 4, ST+F+S+U }, /* 11 1 1010: stfsux */
+ { 8, ST+F+U }, /* 11 1 1011: stfdux */
+ INVALID, /* 11 1 1100 */
+ INVALID, /* 11 1 1101 */
+ INVALID, /* 11 1 1110 */
+ INVALID, /* 11 1 1111 */
+};
+
+#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
+
+int
+fix_alignment(struct pt_regs *regs)
+{
+ int instr, nb, flags;
+ int opcode, f1, f2, f3;
+ int i, t;
+ int reg, areg;
+ unsigned char *addr;
+ union {
+ int l;
+ long ll;
+ float f;
+ double d;
+ unsigned char v[8];
+ } data;
+
+ if (__is_processor(PV_POWER4)) {
+ /*
+ * The POWER4 has a DSISR register but doesn't set it on
+ * an alignment fault. -- paulus
+ */
+
+ instr = *((unsigned int *)regs->nip);
+ opcode = OPCD(instr);
+ reg = RS(instr);
+ areg = RA(instr);
+
+ if (IS_DFORM(opcode)) {
+ f1 = 0;
+ f2 = (instr & 0x04000000) >> 26;
+ f3 = (instr & 0x78000000) >> 27;
+ } else {
+ f1 = (instr & 0x00000006) >> 1;
+ f2 = (instr & 0x00000040) >> 6;
+ f3 = (instr & 0x00000780) >> 7;
+ }
+
+ instr = ((f1 << 5) | (f2 << 4) | f3);
+ } else {
+ reg = (regs->dsisr >> 5) & 0x1f; /* source/dest register */
+ areg = regs->dsisr & 0x1f; /* register to update */
+ instr = (regs->dsisr >> 10) & 0x7f;
+ instr |= (regs->dsisr >> 13) & 0x60;
+ }
+
+ nb = aligninfo[instr].len;
+ if (nb == 0) {
+ long *p;
+ int i;
+
+ if (instr != DCBZ)
+ return 0; /* too hard or invalid instruction */
+ /*
+ * The dcbz (data cache block zero) instruction
+ * gives an alignment fault if used on non-cacheable
+ * memory. We handle the fault mainly for the
+ * case when we are running with the cache disabled
+ * for debugging.
+ */
+ p = (long *) (regs->dar & -L1_CACHE_BYTES);
+ for (i = 0; i < L1_CACHE_BYTES / sizeof(long); ++i)
+ p[i] = 0;
+ return 1;
+ }
+
+ flags = aligninfo[instr].flags;
+ addr = (unsigned char *)regs->dar;
+
+ /* Verify the address of the operand */
+ if (user_mode(regs)) {
+ if (verify_area((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb))
+ return -EFAULT; /* bad address */
+ }
+
+ if ((flags & F) && (regs->msr & MSR_FP))
+ giveup_fpu(current);
+ if (flags & M)
+ return 0; /* too hard for now */
+
+ /* If we read the operand, copy it in */
+ if (flags & LD) {
+ if (nb == 2) {
+ data.v[0] = data.v[1] = 0;
+ if (__get_user(data.v[2], addr)
+ || __get_user(data.v[3], addr+1))
+ return -EFAULT;
+ } else {
+ for (i = 0; i < nb; ++i)
+ if (__get_user(data.v[i], addr+i))
+ return -EFAULT;
+ }
+ }
+ /* Unfortunately D (== 0x100) doesn't fit in the aligninfo[n].flags
+ field. So synthesize it here. */
+ if ((flags & F) == 0 && nb == 8)
+ flags |= D;
+
+ switch (flags & ~U) {
+ case LD+SE:
+ if (data.v[2] >= 0x80)
+ data.v[0] = data.v[1] = -1;
+ /* fall through */
+ case LD:
+ regs->gpr[reg] = data.l;
+ break;
+ case LD+D:
+ regs->gpr[reg] = data.ll;
+ break;
+ case LD+S:
+ if (nb == 2) {
+ SWAP(data.v[2], data.v[3]);
+ } else {
+ SWAP(data.v[0], data.v[3]);
+ SWAP(data.v[1], data.v[2]);
+ }
+ regs->gpr[reg] = data.l;
+ break;
+ case ST:
+ data.l = regs->gpr[reg];
+ break;
+ case ST+D:
+ data.ll = regs->gpr[reg];
+ break;
+ case ST+S:
+ data.l = regs->gpr[reg];
+ if (nb == 2) {
+ SWAP(data.v[2], data.v[3]);
+ } else {
+ SWAP(data.v[0], data.v[3]);
+ SWAP(data.v[1], data.v[2]);
+ }
+ break;
+ case LD+F:
+ current->thread.fpr[reg] = data.d;
+ break;
+ case ST+F:
+ data.d = current->thread.fpr[reg];
+ break;
+ /* these require some floating point conversions... */
+ /* we'd like to use the assignment, but we have to compile
+ * the kernel with -msoft-float so it doesn't use the
+ * fp regs for copying 8-byte objects. */
+ case LD+F+S:
+ enable_kernel_fp();
+ cvt_fd(&data.f, ¤t->thread.fpr[reg], ¤t->thread.fpscr);
+ /* current->thread.fpr[reg] = data.f; */
+ break;
+ case ST+F+S:
+ enable_kernel_fp();
+ cvt_df(¤t->thread.fpr[reg], &data.f, ¤t->thread.fpscr);
+ /* data.f = current->thread.fpr[reg]; */
+ break;
+ default:
+ printk("align: can't handle flags=%x\n", flags);
+ return 0;
+ }
+
+ if (flags & ST) {
+ if (nb == 2) {
+ if (__put_user(data.v[2], addr)
+ || __put_user(data.v[3], addr+1))
+ return -EFAULT;
+ } else {
+ for (i = 0; i < nb; ++i)
+ if (__put_user(data.v[i], addr+i))
+ return -EFAULT;
+ }
+ }
+
+ if (flags & U) {
+ regs->gpr[areg] = regs->dar;
+ }
+
+ return 1;
+}
--- /dev/null
+/*
+ * binfmt_elf32.c: Support 32-bit PPC ELF binaries on Power3 and followons.
+ * based on the SPARC64 version.
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ *
+ * Copyright (C) 2000,2001 Ken Aaker (kdaaker@rchland.vnet.ibm.com), IBM Corp
+ * Copyright (C) 2001 Anton Blanchard (anton@au.ibm.com), IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define ELF_ARCH EM_PPC
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB;
+
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/elfcore.h>
+
+struct timeval32
+{
+ int tv_sec, tv_usec;
+};
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned int pr_sigpend; /* Set of pending signals */
+ unsigned int pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct timeval32 pr_utime; /* User time */
+ struct timeval32 pr_stime; /* System time */
+ struct timeval32 pr_cutime; /* Cumulative user time */
+ struct timeval32 pr_cstime; /* Cumulative system time */
+ elf_gregset_t pr_reg; /* General purpose registers. */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned int pr_flag; /* flags */
+ u32 pr_uid;
+ u32 pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
+extern void start_thread32(struct pt_regs *, unsigned long, unsigned long);
+#undef start_thread
+#define start_thread start_thread32
+#define init_elf_binfmt init_elf32_binfmt
+
+#undef CONFIG_BINFMT_ELF
+#ifdef CONFIG_BINFMT_ELF32
+#define CONFIG_BINFMT_ELF CONFIG_BINFMT_ELF32
+#endif
+#undef CONFIG_BINFMT_ELF_MODULE
+#ifdef CONFIG_BINFMT_ELF32_MODULE
+#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
+#endif
+
+#include "../../../fs/binfmt_elf.c"
--- /dev/null
+/*
+ * These are too big to be inlined.
+ */
+
+#include <linux/kernel.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+
+unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+ unsigned long result = offset & ~63UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 63UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (64-offset);
+ if (size < 64)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= 64;
+ result += 64;
+ }
+ while (size & ~63UL) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += 64;
+ size -= 64;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffz(tmp);
+}
+
+static __inline__ unsigned long ___ffs(unsigned long word)
+{
+ unsigned long result = 0;
+
+ while (!(word & 1UL)) {
+ result++;
+ word >>= 1;
+ }
+ return result;
+}
+
+unsigned long find_next_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+ unsigned long result = offset & ~63UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 63UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < 64)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= 64;
+ result += 64;
+ }
+ while (size & ~63UL) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += 64;
+ size -= 64;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (64 - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ___ffs(tmp);
+}
+
+static __inline__ unsigned int ext2_ilog2(unsigned int x)
+{
+ int lz;
+
+ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 31 - lz;
+}
+
+static __inline__ unsigned int ext2_ffz(unsigned int x)
+{
+ u32 tempRC;
+ if ((x = ~x) == 0)
+ return 32;
+ tempRC = ext2_ilog2(x & -x);
+ return tempRC;
+}
+
+unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
+ unsigned int result = offset & ~31;
+ unsigned int tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31;
+ if (offset) {
+ tmp = cpu_to_le32p(p++);
+ tmp |= ~0U >> (32-offset); /* bug or feature ? */
+ if (size < 32)
+ goto found_first;
+ if (tmp != ~0)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size >= 32) {
+ if ((tmp = cpu_to_le32p(p++)) != ~0)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = cpu_to_le32p(p);
+found_first:
+ tmp |= ~0 << size;
+ if (tmp == ~0) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ext2_ffz(tmp);
+}
--- /dev/null
+/*
+ * linux/arch/ppc/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Adapted from 'alpha' version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * Modified by PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * bootup setup stuff..
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/major.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/blk.h>
+#include <linux/ioport.h>
+#include <linux/console.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/adb.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+
+#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/pci-bridge.h>
+#include <asm/pci_dma.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/keyboard.h>
+#include <asm/init.h>
+#include <asm/Naca.h>
+#include <asm/time.h>
+
+#include "local_irq.h"
+#include "i8259.h"
+#include "open_pic.h"
+#include "xics.h"
+#include <asm/ppcdebug.h>
+
+extern volatile unsigned char *chrp_int_ack_special;
+extern struct Naca *naca;
+
+void chrp_setup_pci_ptrs(void);
+void chrp_progress(char *, unsigned short);
+void chrp_request_regions(void);
+
+extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int pckbd_getkeycode(unsigned int scancode);
+extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
+ char raw_mode);
+extern char pckbd_unexpected_up(unsigned char keycode);
+extern void pckbd_leds(unsigned char leds);
+extern void pckbd_init_hw(void);
+extern unsigned char pckbd_sysrq_xlate[128];
+extern void openpic_init_IRQ(void);
+extern void init_ras_IRQ(void);
+
+extern void find_and_init_phbs(void);
+extern void pSeries_pcibios_fixup(void);
+extern void iSeries_pcibios_fixup(void);
+
+extern void pSeries_get_rtc_time(struct rtc_time *rtc_time);
+extern int pSeries_set_rtc_time(struct rtc_time *rtc_time);
+void pSeries_calibrate_decr(void);
+
+kdev_t boot_dev;
+unsigned long virtPython0Facilities = 0; // python0 facility area (memory mapped io) (64-bit format) VIRTUAL address.
+
+extern HPTE *Hash, *Hash_end;
+extern unsigned long Hash_size, Hash_mask;
+extern int probingmem;
+extern unsigned long loops_per_jiffy;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+#endif
+
+void
+chrp_get_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *model = "";
+
+ root = find_path_device("/");
+ if (root)
+ model = get_property(root, "model", NULL);
+ seq_printf(m, "machine\t\t: CHRP %s\n", model);
+}
+
+void __init chrp_request_regions(void) {
+ request_region(0x20,0x20,"pic1");
+ request_region(0xa0,0x20,"pic2");
+ request_region(0x00,0x20,"dma1");
+ request_region(0x40,0x20,"timer");
+ request_region(0x80,0x10,"dma page reg");
+ request_region(0xc0,0x20,"dma2");
+}
+
+void __init
+chrp_setup_arch(void)
+{
+ extern char cmd_line[];
+ struct device_node *root;
+ unsigned int *opprop;
+
+ /* openpic global configuration register (64-bit format). */
+ /* openpic Interrupt Source Unit pointer (64-bit format). */
+ /* python0 facility area (mmio) (64-bit format) REAL address. */
+
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* this is fine for chrp */
+ initrd_below_start_ok = 1;
+
+ if (initrd_start)
+ ROOT_DEV = mk_kdev(RAMDISK_MAJOR, 0);
+ else
+#endif
+ ROOT_DEV = to_kdev_t(0x0802); /* sda2 (sda1 is for the kernel) */
+
+ printk("Boot arguments: %s\n", cmd_line);
+
+ /* Find and initialize PCI host bridges */
+ /* iSeries needs to be done much later. */
+ #ifndef CONFIG_PPC_ISERIES
+ find_and_init_phbs();
+ #endif
+
+ /* Find the Open PIC if present */
+ root = find_path_device("/");
+ opprop = (unsigned int *) get_property(root,
+ "platform-open-pic", NULL);
+ if (opprop != 0) {
+ int n = prom_n_addr_cells(root);
+ unsigned long openpic;
+
+ for (openpic = 0; n > 0; --n)
+ openpic = (openpic << 32) + *opprop++;
+ printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic);
+ udbg_printf("OpenPIC addr: %lx\n", openpic);
+ OpenPIC_Addr = __ioremap(openpic, 0x40000, _PAGE_NO_CACHE);
+ }
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+}
+
+void __init
+chrp_init2(void)
+{
+ /*
+ * It is sensitive, when this is called (not too earlu)
+ * -- tibit
+ */
+ chrp_request_regions();
+ ppc_md.progress(UTS_RELEASE, 0x7777);
+}
+
+
+/* Early initialization. Relocation is on but do not reference unbolted pages */
+void __init pSeries_init_early(void)
+{
+#ifdef CONFIG_PPC_PSERIES /* This ifdef should go away */
+ void *comport;
+
+ hpte_init_pSeries();
+ tce_init_pSeries();
+ pSeries_pcibios_init_early();
+
+#ifdef CONFIG_SMP
+ smp_init_pSeries();
+#endif
+
+ /* Map the uart for udbg. */
+ comport = (void *)__ioremap(naca->serialPortAddr, 16, _PAGE_NO_CACHE);
+ udbg_init_uart(comport);
+
+ ppc_md.udbg_putc = udbg_putc;
+ ppc_md.udbg_getc = udbg_getc;
+ ppc_md.udbg_getc_poll = udbg_getc_poll;
+#endif
+}
+
+void __init
+chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+#if 0 /* PPPBBB remove this later... -Peter */
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* take care of initrd if we have one */
+ if ( r6 )
+ {
+ initrd_start = __va(r6);
+ initrd_end = __va(r6 + r7);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+#endif
+
+ ppc_md.ppc_machine = _machine;
+
+ ppc_md.setup_arch = chrp_setup_arch;
+ ppc_md.setup_residual = NULL;
+ ppc_md.get_cpuinfo = chrp_get_cpuinfo;
+ if(naca->interrupt_controller == IC_OPEN_PIC) {
+ ppc_md.init_IRQ = openpic_init_IRQ;
+ ppc_md.get_irq = openpic_get_irq;
+ ppc_md.post_irq = NULL;
+ } else {
+ ppc_md.init_IRQ = xics_init_IRQ;
+ ppc_md.get_irq = xics_get_irq;
+ ppc_md.post_irq = NULL;
+ }
+ ppc_md.init_ras_IRQ = init_ras_IRQ;
+
+ #ifndef CONFIG_PPC_ISERIES
+ ppc_md.pcibios_fixup = pSeries_pcibios_fixup;
+ #else
+ ppc_md.pcibios_fixup = NULL;
+ // ppc_md.pcibios_fixup = iSeries_pcibios_fixup;
+ #endif
+
+
+ ppc_md.init = chrp_init2;
+
+ ppc_md.restart = rtas_restart;
+ ppc_md.power_off = rtas_power_off;
+ ppc_md.halt = rtas_halt;
+
+ ppc_md.time_init = NULL;
+ ppc_md.get_boot_time = pSeries_get_rtc_time;
+ ppc_md.get_rtc_time = pSeries_get_rtc_time;
+ ppc_md.set_rtc_time = pSeries_set_rtc_time;
+ ppc_md.calibrate_decr = pSeries_calibrate_decr;
+
+ ppc_md.progress = chrp_progress;
+
+#ifdef CONFIG_VT
+ ppc_md.kbd_setkeycode = pckbd_setkeycode;
+ ppc_md.kbd_getkeycode = pckbd_getkeycode;
+ ppc_md.kbd_translate = pckbd_translate;
+ ppc_md.kbd_unexpected_up = pckbd_unexpected_up;
+ ppc_md.kbd_leds = pckbd_leds;
+ ppc_md.kbd_init_hw = pckbd_init_hw;
+#ifdef CONFIG_MAGIC_SYSRQ
+ ppc_md.ppc_kbd_sysrq_xlate = pckbd_sysrq_xlate;
+ SYSRQ_KEY = 0x63; /* Print Screen */
+#endif
+#endif
+
+ ppc_md.progress("Linux ppc64\n", 0x0);
+}
+
+void __chrp
+chrp_progress(char *s, unsigned short hex)
+{
+ struct device_node *root;
+ int width, *p;
+ char *os;
+ static int display_character, set_indicator;
+ static int max_width;
+
+ if (hex)
+ udbg_printf("<chrp_progress> %s\n", s);
+
+ if (!rtas.base || (_machine != _MACH_pSeries))
+ return;
+
+ if (max_width == 0) {
+ if ( (root = find_path_device("/rtas")) &&
+ (p = (unsigned int *)get_property(root,
+ "ibm,display-line-length",
+ NULL)) )
+ max_width = *p;
+ else
+ max_width = 0x10;
+ display_character = rtas_token("display-character");
+ set_indicator = rtas_token("set-indicator");
+ }
+ if (display_character == RTAS_UNKNOWN_SERVICE) {
+ /* use hex display */
+ if (set_indicator == RTAS_UNKNOWN_SERVICE)
+ return;
+ rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
+ return;
+ }
+
+ rtas_call(display_character, 1, 1, NULL, '\r');
+
+ width = max_width;
+ os = s;
+ while ( *os )
+ {
+ if ( (*os == '\n') || (*os == '\r') )
+ width = max_width;
+ else
+ width--;
+ rtas_call(display_character, 1, 1, NULL, *os++ );
+ /* if we overwrite the screen length */
+ if ( width == 0 )
+ while ( (*os != 0) && (*os != '\n') && (*os != '\r') )
+ os++;
+ }
+
+ /* Blank to end of line. */
+ while ( width-- > 0 )
+ rtas_call(display_character, 1, 1, NULL, ' ' );
+}
+
+extern void setup_default_decr(void);
+
+void __init pSeries_calibrate_decr(void)
+{
+ struct device_node *cpu;
+ struct div_result divres;
+ int *fp;
+ unsigned long freq;
+
+ /*
+ * The cpu node should have a timebase-frequency property
+ * to tell us the rate at which the decrementer counts.
+ */
+ freq = 16666000; /* hardcoded default */
+ cpu = find_type_devices("cpu");
+ if (cpu != 0) {
+ fp = (int *) get_property(cpu, "timebase-frequency", NULL);
+ if (fp != 0)
+ freq = *fp;
+ }
+ printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ freq/1000000, freq%1000000 );
+
+ tb_ticks_per_jiffy = freq / HZ;
+ tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
+ tb_ticks_per_usec = freq / 1000000;
+ tb_to_us = mulhwu_scale_factor(freq, 1000000);
+ div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
+ tb_to_xs = divres.result_low;
+
+ setup_default_decr();
+}
+
--- /dev/null
+/*
+ * eeh.c
+ * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Change Activity:
+ * 2001/10/27 : engebret : Created.
+ * End Change Activity
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/bootmem.h>
+#include <asm/Paca.h>
+#include <asm/processor.h>
+#include <asm/Naca.h>
+#include <asm/io.h>
+#include "pci.h"
+
+#define BUID_HI(buid) ((buid) >> 32)
+#define BUID_LO(buid) ((buid) & 0xffffffff)
+#define CONFIG_ADDR(busno, devfn) (((((busno) & 0xff) << 8) | ((devfn) & 0xf8)) << 8)
+
+unsigned long eeh_total_mmio_reads;
+unsigned long eeh_total_mmio_ffs;
+unsigned long eeh_false_positives;
+/* RTAS tokens */
+static int ibm_set_eeh_option;
+static int ibm_set_slot_reset;
+static int ibm_read_slot_reset_state;
+
+static int eeh_implemented;
+#define EEH_MAX_OPTS 4096
+static char *eeh_opts;
+static int eeh_opts_last;
+static int eeh_check_opts_config(struct pci_dev *dev);
+
+
+unsigned long eeh_token(unsigned long phb, unsigned long bus, unsigned long devfn, unsigned long offset)
+{
+ if (phb > 0xff)
+ panic("eeh_token: phb 0x%lx is too large\n", phb);
+ if (offset & 0x0fffffff00000000)
+ panic("eeh_token: offset 0x%lx is out of range\n", offset);
+ return ((IO_UNMAPPED_REGION_ID << 60) | (phb << 48UL) | ((bus & 0xff) << 40UL) | (devfn << 32UL) | (offset & 0xffffffff));
+}
+
+
+
+int eeh_get_state(unsigned long ea) {
+ return 0;
+}
+
+
+/* Check for an eeh failure at the given token address.
+ * The given value has been read and it should be 1's (0xff, 0xffff or 0xffffffff).
+ *
+ * Probe to determine if an error actually occurred. If not return val.
+ * Otherwise panic.
+ */
+unsigned long eeh_check_failure(void *token, unsigned long val)
+{
+ unsigned long config_addr = (unsigned long)token >> 24; /* PPBBDDRR */
+ unsigned long phbidx = (config_addr >> 24) & 0xff;
+ struct pci_controller *phb;
+ unsigned long ret, rets[2];
+
+ config_addr &= 0xffff00; /* 00BBDD00 */
+
+ if (phbidx >= global_phb_number) {
+ panic("EEH: checking token %p phb index of %ld is greater than max of %d\n", token, phbidx, global_phb_number-1);
+ }
+ phb = phbtab[phbidx];
+ eeh_false_positives++;
+
+ ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
+ config_addr, BUID_HI(phb->buid), BUID_LO(phb->buid));
+ if (ret == 0 && rets[1] == 1 && rets[2] != 0) {
+ struct pci_dev *dev;
+ int bus = ((unsigned long)token >> 40) & 0xffff; /* include PHB# in bus */
+ int devfn = (config_addr >> 8) & 0xff;
+
+ dev = pci_find_slot(bus, devfn);
+ if (dev)
+ panic("EEH: MMIO failure (%ld) on device:\n %s %s\n",
+ rets[2], dev->slot_name, dev->name);
+ else
+ panic("EEH: MMIO failure (%ld) on device buid %lx, config_addr %lx\n", rets[2], phb->buid, config_addr);
+ }
+ return val; /* good case */
+}
+
+void eeh_init(void) {
+ ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
+ ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
+ ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
+ if (ibm_set_eeh_option != RTAS_UNKNOWN_SERVICE) {
+ printk("PCI Enhanced I/O Error Handling Enabled\n");
+ eeh_implemented = 1;
+ }
+}
+
+
+/* Given a PCI device check if eeh should be configured or not.
+ * This may look at firmware properties and/or kernel cmdline options.
+ */
+int is_eeh_configured(struct pci_dev *dev)
+{
+ struct device_node *dn = pci_device_to_OF_node(dev);
+ struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
+ unsigned long ret, rets[2];
+
+ if (dn == NULL || phb == NULL || phb->buid == 0 || !eeh_implemented)
+ return 0;
+
+ /* Hack: turn off eeh for display class devices.
+ * This fixes matrox accel framebuffer.
+ */
+ if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+ return 0;
+
+ if (!eeh_check_opts_config(dev))
+ return 0;
+
+ ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
+ CONFIG_ADDR(dn->busno, dn->devfn),
+ BUID_HI(phb->buid), BUID_LO(phb->buid));
+ if (ret == 0 && rets[1] == 1) {
+ printk("EEH: %s %s is EEH capable.\n", dev->slot_name, dev->name);
+ return 1;
+ }
+ return 0;
+}
+
+int eeh_set_option(struct pci_dev *dev, int option)
+{
+ struct device_node *dn = pci_device_to_OF_node(dev);
+ struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
+
+ if (dn == NULL || phb == NULL || phb->buid == 0 || !eeh_implemented)
+ return -2;
+
+ return rtas_call(ibm_set_eeh_option, 4, 1, NULL,
+ CONFIG_ADDR(dn->busno, dn->devfn),
+ BUID_HI(phb->buid), BUID_LO(phb->buid), option);
+}
+
+
+static int eeh_proc_falsepositive_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ len = sprintf(page, "eeh_false_positives=%ld\n"
+ "eeh_total_mmio_ffs=%ld\n"
+ "eeh_total_mmio_reads=%ld\n",
+ eeh_false_positives, eeh_total_mmio_ffs, eeh_total_mmio_reads);
+ return len;
+}
+
+/* Implementation of /proc/ppc64/eeh
+ * For now it is one file showing false positives.
+ */
+void eeh_init_proc(struct proc_dir_entry *top)
+{
+ struct proc_dir_entry *ent = create_proc_entry("eeh", S_IRUGO, top);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = NULL;
+ ent->read_proc = (void *)eeh_proc_falsepositive_read;
+ }
+}
+
+/*
+ * Test if "dev" should be configured on or off.
+ * This processes the options literally from right to left.
+ * This lets the user specify stupid combinations of options,
+ * but at least the result should be very predictable.
+ */
+static int eeh_check_opts_config(struct pci_dev *dev)
+{
+ struct device_node *dn = pci_device_to_OF_node(dev);
+ struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
+ char devname[32], classname[32], phbname[32];
+ char *strs[8], *s;
+ int nstrs, i;
+ int ret = 0;
+
+ if (dn == NULL || phb == NULL || phb->buid == 0 || !eeh_implemented)
+ return 0;
+ /* Build list of strings to match */
+ nstrs = 0;
+ s = (char *)get_property(dn, "ibm,loc-code", 0);
+ if (s)
+ strs[nstrs++] = s;
+ sprintf(devname, "dev%04x:%04x", dev->vendor, dev->device);
+ strs[nstrs++] = devname;
+ sprintf(classname, "class%04x", dev->class);
+ strs[nstrs++] = classname;
+ sprintf(phbname, "pci@%lx", phb->buid);
+ strs[nstrs++] = phbname;
+ strs[nstrs++] = ""; /* yes, this matches the empty string */
+
+ /* Now see if any string matches the eeh_opts list.
+ * The eeh_opts list entries start with + or -.
+ */
+ for (s = eeh_opts; s && (s < (eeh_opts + eeh_opts_last)); s += strlen(s)+1) {
+ for (i = 0; i < nstrs; i++) {
+ if (strcasecmp(strs[i], s+1) == 0) {
+ ret = (strs[0] == '+') ? 1 : 0;
+ }
+ }
+ }
+ return ret;
+}
+
+/* Handle kernel eeh-on & eeh-off cmd line options for eeh.
+ *
+ * We support:
+ * eeh-off=loc1,loc2,loc3...
+ *
+ * and this option can be repeated so
+ * eeh-off=loc1,loc2 eeh=loc3
+ * is the same as eeh-off=loc1,loc2,loc3
+ *
+ * loc is an IBM location code that can be found in a manual or
+ * via openfirmware (or the Hardware Management Console).
+ *
+ * We also support these additional "loc" values:
+ *
+ * dev#:# vendor:device id in hex (e.g. dev1022:2000)
+ * class# class id in hex (e.g. class0200)
+ * pci@buid all devices under phb (e.g. pci@fef00000)
+ *
+ * If no location code is specified all devices are assumed
+ * so eeh-off means eeh by default is off.
+ */
+
+/* This is implemented as a null separated list of strings.
+ * Each string looks like this: "+X" or "-X"
+ * where X is a loc code, dev, class or pci string (as shown above)
+ * or empty which is used to indicate all.
+ *
+ * We interpret this option string list during the buswalk
+ * so that it will literally behave left-to-right even if
+ * some combinations don't make sense. Give the user exactly
+ * what they want! :)
+ */
+
+static int __init eeh_parm(char *str, int state)
+{
+ char *s, *cur, *curend;
+ if (!eeh_opts) {
+ eeh_opts = alloc_bootmem(EEH_MAX_OPTS);
+ eeh_opts[eeh_opts_last++] = '+'; /* default */
+ eeh_opts[eeh_opts_last++] = '\0';
+ }
+ if (*str == '\0') {
+ eeh_opts[eeh_opts_last++] = state ? '+' : '-';
+ eeh_opts[eeh_opts_last++] = '\0';
+ return 1;
+ }
+ if (*str == '=')
+ str++;
+ for (s = str; s && *s != '\0'; s = curend) {
+ cur = s;
+ while (*cur == ',')
+ cur++; /* ignore empties. Don't treat as "all-on" or "all-off" */
+ curend = strchr(cur, ',');
+ if (!curend)
+ curend = cur + strlen(cur);
+ if (*cur) {
+ int curlen = curend-cur;
+ char *sym = eeh_opts+eeh_opts_last;
+ if (eeh_opts_last + curlen > EEH_MAX_OPTS-2) {
+ printk("EEH: sorry...too many eeh cmd line options\n");
+ return 1;
+ }
+ eeh_opts[eeh_opts_last++] = state ? '+' : '-';
+ strncpy(eeh_opts+eeh_opts_last, cur, curlen);
+ eeh_opts_last += curlen;
+ eeh_opts[eeh_opts_last++] = '\0';
+ }
+ }
+ return 1;
+}
+
+static int __init eehoff_parm(char *str)
+{
+ return eeh_parm(str, 0);
+}
+static int __init eehon_parm(char *str)
+{
+ return eeh_parm(str, 1);
+}
+
+
+__setup("eeh-off", eehoff_parm);
+__setup("eeh-on", eehon_parm);
--- /dev/null
+/*
+ * arch/ppc/kernel/entry.S
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ * This file contains the system call entry code, context switch
+ * code, and exception/interrupt return code for PowerPC.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ppc_asm.h"
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/thread_info.h>
+#include <linux/errno.h>
+#include <linux/sys.h>
+#include <linux/config.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#define DO_SOFT_DISABLE
+#endif
+
+#undef SHOW_SYSCALLS
+#undef SHOW_SYSCALLS_TASK
+
+#ifdef SHOW_SYSCALLS_TASK
+ .data
+show_syscalls_task:
+ .long -1
+#endif
+
+/*
+ * Handle a system call.
+ */
+ .text
+_GLOBAL(DoSyscall)
+ std r0,THREAD+LAST_SYSCALL(r13)
+ ld r11,_CCR(r1) /* Clear SO bit in CR */
+ lis r10,0x1000
+ andc r11,r11,r10
+ std r11,_CCR(r1)
+#ifdef SHOW_SYSCALLS
+#ifdef SHOW_SYSCALLS_TASK
+ LOADBASE(r31,show_syscalls_task)
+ ld r31,show_syscalls_task@l(r31)
+ cmp 0,r13,r31
+ bne 1f
+#endif
+ LOADADDR(r3,7f)
+ ld r4,GPR0(r1)
+ ld r5,GPR3(r1)
+ ld r6,GPR4(r1)
+ ld r7,GPR5(r1)
+ ld r8,GPR6(r1)
+ ld r9,GPR7(r1)
+ bl .printk
+ LOADADDR(r3,77f)
+ ld r4,GPR8(r1)
+ ld r5,GPR9(r1)
+ mr r6,r13
+ bl .printk
+ ld r0,GPR0(r1)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+1:
+#endif /* SHOW_SYSCALLS */
+ clrrdi r10,r1,THREAD_SHIFT
+ ld r10,TI_FLAGS(r10)
+ andi. r11,r10,_TIF_SYSCALL_TRACE
+ bne- 50f
+ cmpli 0,r0,NR_syscalls
+ bge- 66f
+/*
+ * Need to vector to 32 Bit or default sys_call_table here,
+ * based on caller's run-mode / personality.
+ */
+#ifdef CONFIG_BINFMT_ELF32
+ andi. r11,r10,_TIF_32BIT
+ beq- 15f
+ LOADADDR(r10,.sys_call_table32)
+/*
+ * Now mung the first 4 parameters into shape, by making certain that
+ * the high bits (most significant 32 bits in 64 bit reg) are 0
+ * for the first 4 parameter regs(3-6).
+ */
+ clrldi r3,r3,32
+ clrldi r4,r4,32
+ clrldi r5,r5,32
+ clrldi r6,r6,32
+#if 0 /* XXX Why not ??? - Anton */
+ clrldi r7,r7,32
+ clrldi r8,r8,32
+#endif
+ b 17f
+15:
+#endif
+ LOADADDR(r10,.sys_call_table)
+17: slwi r0,r0,3
+ ldx r10,r10,r0 /* Fetch system call handler [ptr] */
+ mtlr r10
+ addi r9,r1,STACK_FRAME_OVERHEAD
+ blrl /* Call handler */
+_GLOBAL(ret_from_syscall_1)
+20: std r3,RESULT(r1) /* Save result */
+#ifdef SHOW_SYSCALLS
+#ifdef SHOW_SYSCALLS_TASK
+ cmp 0,r13,r31
+ bne 91f
+#endif
+ mr r4,r3
+ LOADADDR(r3,79f)
+ bl .printk
+ ld r3,RESULT(r1)
+91:
+#endif
+ li r10,-_LAST_ERRNO
+ cmpl 0,r3,r10
+ blt 30f
+ neg r3,r3
+ cmpi 0,r3,ERESTARTNOHAND
+ bne 22f
+ li r3,EINTR
+22: ld r10,_CCR(r1) /* Set SO bit in CR */
+ oris r10,r10,0x1000
+ std r10,_CCR(r1)
+30: std r3,GPR3(r1) /* Update return value */
+ b .ret_from_except
+66: li r3,ENOSYS
+ b 22b
+
+/* Traced system call support */
+50: bl .do_syscall_trace
+ ld r0,GPR0(r1) /* Restore original registers */
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ /* XXX check this - Anton */
+ ld r9,GPR9(r1)
+ cmpli 0,r0,NR_syscalls
+ bge- 66f
+/*
+ * Need to vector to 32 Bit or default sys_call_table here,
+ * based on caller's run-mode / personality.
+ */
+#ifdef CONFIG_BINFMT_ELF32
+ clrrdi r10,r1,THREAD_SHIFT
+ ld r10,TI_FLAGS(r10)
+ andi. r11,r10,_TIF_32BIT
+ beq- 55f
+ LOADADDR(r10,.sys_call_table32)
+/*
+ * Now mung the first 4 parameters into shape, by making certain that
+ * the high bits (most significant 32 bits in 64 bit reg) are 0
+ * for the first 4 parameter regs(3-6).
+ */
+ clrldi r3,r3,32
+ clrldi r4,r4,32
+ clrldi r5,r5,32
+ clrldi r6,r6,32
+#if 0 /* XXX Why not ??? - Anton */
+ clrldi r7,r7,32
+ clrldi r8,r8,32
+#endif
+ b 57f
+55:
+#endif
+ LOADADDR(r10,.sys_call_table)
+57:
+ slwi r0,r0,3
+ ldx r10,r10,r0 /* Fetch system call handler [ptr] */
+ mtlr r10
+ addi r9,r1,STACK_FRAME_OVERHEAD
+ blrl /* Call handler */
+_GLOBAL(ret_from_syscall_2)
+58: std r3,RESULT(r1) /* Save result */
+ std r3,GPR0(r1) /* temporary gross hack to make strace work */
+ li r10,-_LAST_ERRNO
+ cmpl 0,r3,r10
+ blt 60f
+ neg r3,r3
+ cmpi 0,r3,ERESTARTNOHAND
+ bne 57f
+ li r3,EINTR
+57: ld r10,_CCR(r1) /* Set SO bit in CR */
+ oris r10,r10,0x1000
+ std r10,_CCR(r1)
+60: std r3,GPR3(r1) /* Update return value */
+ bl .do_syscall_trace
+ b .ret_from_except
+66: li r3,ENOSYS
+ b 57b
+#ifdef SHOW_SYSCALLS
+7: .string "syscall %d(%x, %x, %x, %x, %x, "
+77: .string "%x, %x), current=%p\n"
+79: .string " -> %x\n"
+ .align 2,0
+#endif
+
+_GLOBAL(ppc32_sigreturn)
+ bl .sys32_sigreturn
+ b 80f
+
+_GLOBAL(ppc32_rt_sigreturn)
+ bl .sys32_rt_sigreturn
+ b 80f
+
+_GLOBAL(ppc64_sigreturn)
+ bl .sys_sigreturn
+ b 80f
+
+_GLOBAL(ppc64_rt_sigreturn)
+ bl .sys_rt_sigreturn
+
+80: clrrdi r4,r1,THREAD_SHIFT
+ ld r4,TI_FLAGS(r4)
+ andi. r4,r4,_TIF_SYSCALL_TRACE
+ bne- 81f
+ cmpi 0,r3,0
+ bge .ret_from_except
+ b 20b
+81: cmpi 0,r3,0
+ blt 58b
+ bl .do_syscall_trace
+ b .ret_from_except
+
+/*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+ * of the other is restored from its kernel stack. The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via ret_from_except.
+ * On entry, r3 points to the THREAD for the current task, r4
+ * points to the THREAD for the new task.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path. If you change this (or in particular, the
+ * SAVE_REGS macro), you'll have to change the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc/kernel/process.c
+ */
+_GLOBAL(_switch)
+ stdu r1,-INT_FRAME_SIZE(r1)
+ ld r6,0(r1)
+ std r6,GPR1(r1)
+ /* r3-r13 are caller saved -- Cort */
+ SAVE_GPR(2, r1)
+ SAVE_8GPRS(14, r1)
+ SAVE_10GPRS(22, r1)
+ mflr r20 /* Return to switch caller */
+ mfmsr r22
+ li r6,MSR_FP /* Disable floating-point */
+ andc r22,r22,r6
+ mtmsrd r22
+ isync
+ std r20,_NIP(r1)
+ std r22,_MSR(r1)
+ std r20,_LINK(r1)
+ mfcr r20
+ std r20,_CCR(r1)
+ li r6,0x0ff0
+ std r6,TRAP(r1)
+ std r1,KSP(r3) /* Set old stack pointer */
+
+ mfspr r5,SPRG3 /* Get Paca */
+ /* XXX remove - Anton */
+ addi r3,r3,-THREAD /* old 'current' for return value */
+ addi r13,r4,-THREAD /* Convert THREAD to 'current' */
+ std r13,PACACURRENT(r5) /* Set new 'current' */
+
+#ifdef CONFIG_PPC_ISERIES
+#error fixme
+ ld r7,TI_FLAGS(r4) /* Get run light flag */
+ mfspr r9,CTRLF
+ srdi r7,r7,1 /* Align to run light bit in CTRL reg */
+ insrdi r9,r7,1,63 /* Insert run light into CTRL */
+ mtspr CTRLT,r9
+#endif
+ ld r1,KSP(r4) /* Load new stack pointer */
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+ /* r3-r13 are destroyed -- Cort */
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+
+ ld r7,_NIP(r1) /* Return to _switch caller in new task */
+ ld r1,GPR1(r1)
+ mtlr r7
+ blr
+
+_GLOBAL(ret_from_fork)
+#ifdef CONFIG_SMP
+ bl .schedule_tail
+#endif
+ clrrdi r4,r1,THREAD_SHIFT
+ ld r4,TI_FLAGS(r4)
+ andi. r4,r4,_TIF_SYSCALL_TRACE
+ beq+ .ret_from_except
+ bl .do_syscall_trace
+ b .ret_from_except
+
+_GLOBAL(ret_from_except)
+#ifdef CONFIG_PPC_ISERIES
+ ld r5,SOFTE(r1)
+ cmpdi 0,r5,0
+ beq 4f
+irq_recheck:
+ /* Check for pending interrupts (iSeries) */
+ CHECKANYINT(r3,r4)
+ beq+ 4f /* skip do_IRQ if no interrupts */
+
+ mfspr r5,SPRG3
+ li r3,0
+ stb r3,PACAPROCENABLED(r5) /* ensure we are disabled */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_IRQ
+ b irq_recheck /* loop back and handle more */
+4:
+#endif
+ /*
+ * Disable interrupts so that current_thread_info()->flags
+ * can't change between when we test it and when we return
+ * from the interrupt.
+ */
+recheck:
+ mfmsr r10 /* Get current interrupt state */
+ li r4,0
+ ori r4,r4,MSR_EE|MSR_RI
+ andc r10,r10,r4 /* clear MSR_EE and MSR_RI */
+ mtmsrd r10 /* Update machine state */
+
+#ifdef CONFIG_PPC_ISERIES
+#error fix iSeries soft disable
+#endif
+
+ ld r3,_MSR(r1) /* Returning to user mode? */
+ andi. r3,r3,MSR_PR
+ beq restore /* if not, just restore regs and return */
+
+ /* Check current_thread_info()->flags */
+ clrrdi r3,r1,THREAD_SHIFT
+ ld r3,TI_FLAGS(r3)
+ andi. r0,r3,_TIF_USER_WORK_MASK
+ bne do_work
+
+ addi r0,r1,INT_FRAME_SIZE /* size of frame */
+ std r0,THREAD+KSP(r13) /* save kernel stack pointer */
+ mfspr r4,SPRG3 /* current task's PACA */
+ std r1,PACAKSAVE(r4) /* save exception stack pointer */
+
+restore:
+ ld r3,_CTR(r1)
+ ld r0,_LINK(r1)
+ mtctr r3
+ mtlr r0
+ ld r3,_XER(r1)
+ mtspr XER,r3
+ REST_8GPRS(5, r1)
+ REST_10GPRS(14, r1)
+ REST_8GPRS(24, r1)
+
+ stdcx. r0,0,r1 /* to clear the reservation */
+
+#ifdef DO_SOFT_DISABLE
+ ld r0,SOFTE(r1)
+ stb r0,PACAPROCENABLED(r13)
+#endif
+
+ ld r0,_MSR(r1)
+ mtspr SRR1,r0
+ ld r2,_CCR(r1)
+ mtcrf 0xFF,r2
+ ld r2,_NIP(r1)
+ mtspr SRR0,r2
+ REST_GPR(13,r1)
+ ld r0,GPR0(r1)
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r1,GPR1(r1)
+
+ rfid
+
+do_work:
+ /* Enable interrupts */
+ ori r10,r10,MSR_EE|MSR_RI
+ mtmsrd r10
+
+ andi. r0,r3,_TIF_NEED_RESCHED
+ beq 1f
+ bl .schedule
+ b recheck
+
+1: andi. r0,r3,_TIF_SIGPENDING
+ beq 2f
+ li r3,0
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ bl .do_signal
+ b recheck
+
+2: /* nobody uses the TIF_NOTIFY_RESUME bit yet */
+ b recheck
+
+/*
+ * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
+ * called with the MMU off.
+ *
+ * In addition, we need to be in 32b mode, at least for now.
+ *
+ * Note: r3 is an input parameter to rtas, so don't trash it...
+ */
+_GLOBAL(enter_rtas)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
+
+ /* Because RTAS is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
+ */
+ SAVE_GPR(2, r1) /* Save the TOC */
+ SAVE_GPR(13, r1) /* Save current */
+ SAVE_8GPRS(14, r1) /* Save the non-volatiles */
+ SAVE_10GPRS(22, r1) /* ditto */
+
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+ mfsrr0 r9
+ std r9,_SRR0(r1)
+ mfsrr1 r10
+ std r10,_SRR1(r1)
+
+ /* Unfortunatly, the stack pointer and the MSR are also clobbered,
+ * so they are saved in the PACA (SPRG3) which allows us to restore
+ * our original state after RTAS returns.
+ */
+ mfspr r4,SPRG3 /* Get PACA */
+ std r1,PACAR1(r4)
+ mfmsr r6
+ std r6,PACASAVEDMSR(r4)
+
+ /* Setup our real return addr */
+ SET_REG_TO_LABEL(r4,.rtas_return_loc)
+ SET_REG_TO_CONST(r9,KERNELBASE)
+ sub r4,r4,r9
+ mtlr r4
+
+ li r0,0
+ ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
+ andc r0,r6,r0
+
+ li r9,1
+ rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
+ ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
+ andc r6,r0,r9
+ sync /* disable interrupts so SRR0/1 */
+ mtmsrd r0 /* don't get trashed */
+
+ SET_REG_TO_LABEL(r4,rtas)
+ ld r5,RTASENTRY(r4) /* get the rtas->entry value */
+ ld r4,RTASBASE(r4) /* get the rtas->base value */
+
+ mtspr SRR0,r5
+ mtspr SRR1,r6
+ rfid
+
+_STATIC(rtas_return_loc)
+ /* relocation is off at this point */
+ mfspr r4,SPRG3 /* Get PACA */
+ SET_REG_TO_CONST(r5, KERNELBASE)
+ sub r4,r4,r5 /* RELOC the PACA base pointer */
+
+ ld r1,PACAR1(r4) /* Restore our SP */
+ LOADADDR(r3,.rtas_restore_regs)
+ ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
+
+ mtspr SRR0,r3
+ mtspr SRR1,r4
+ rfid
+
+_STATIC(rtas_restore_regs)
+ /* relocation is on at this point */
+ REST_GPR(2, r1) /* Restore the TOC */
+ REST_GPR(13, r1) /* Restore current */
+ REST_8GPRS(14, r1) /* Restore the non-volatiles */
+ REST_10GPRS(22, r1) /* ditto */
+
+ /* put back current in r13 */
+ mfspr r4,SPRG3
+ ld r13,PACACURRENT(r4)
+
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+ ld r9,_SRR0(r1)
+ mtsrr0 r9
+ ld r10,_SRR1(r1)
+ mtsrr1 r10
+
+ addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
+ ld r0,16(r1) /* get return address */
+
+ mtlr r0
+ blr /* return to caller */
+
+_GLOBAL(enter_prom)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
+
+ /* Because PROM is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * PROM might touch to the stack. (r0, r3-r13 are caller saved)
+ */
+ SAVE_8GPRS(2, r1) /* Save the TOC & incoming param(s) */
+ SAVE_GPR(13, r1) /* Save current */
+ SAVE_8GPRS(14, r1) /* Save the non-volatiles */
+ SAVE_10GPRS(22, r1) /* ditto */
+
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+ mfsrr0 r9
+ std r9,_SRR0(r1)
+ mfsrr1 r10
+ std r10,_SRR1(r1)
+ mfmsr r11
+ std r11,_MSR(r1)
+
+ /* Unfortunatly, the stack pointer is also clobbered, so it is saved
+ * in the SPRG2 which allows us to restore our original state after
+ * PROM returns.
+ */
+ mtspr SPRG2,r1
+
+ /* put a relocation offset into r3 */
+ bl .reloc_offset
+ LOADADDR(r12,prom)
+ sub r12,r12,r3
+ ld r12,PROMENTRY(r12) /* get the prom->entry value */
+ mtlr r12
+
+ mfmsr r11 /* grab the current MSR */
+ li r12,1
+ rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+ andc r11,r11,r12
+ li r12,1
+ rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+ andc r11,r11,r12
+ mtmsrd r11
+ isync
+
+ REST_8GPRS(2, r1) /* Restore the TOC & param(s) */
+ REST_GPR(13, r1) /* Restore current */
+ REST_8GPRS(14, r1) /* Restore the non-volatiles */
+ REST_10GPRS(22, r1) /* ditto */
+ blrl /* Entering PROM here... */
+
+ mfspr r1,SPRG2 /* Restore the stack pointer */
+ ld r6,_MSR(r1) /* Restore the MSR */
+ mtmsrd r6
+ isync
+
+ REST_GPR(2, r1) /* Restore the TOC */
+ REST_GPR(13, r1) /* Restore current */
+ REST_8GPRS(14, r1) /* Restore the non-volatiles */
+ REST_10GPRS(22, r1) /* ditto */
+
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+ ld r9,_SRR0(r1)
+ mtsrr0 r9
+ ld r10,_SRR1(r1)
+ mtsrr1 r10
+ addi r1,r1,PROM_FRAME_SIZE
+ ld r0,16(r1) /* get return address */
+
+ mtlr r0
+ blr /* return to caller */
--- /dev/null
+/*
+ * arch/ppc64/kernel/head.S
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
+ * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
+ *
+ * This file contains the low-level support and setup for the
+ * PowerPC-64 platform, including trap and interrupt dispatch.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define SECONDARY_PROCESSORS
+
+#include "ppc_asm.h"
+#include "ppc_defs.h"
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <linux/config.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#define DO_SOFT_DISABLE
+#endif
+
+/*
+ * We layout physical memory as follows:
+ * 0x0000 - 0x00ff : Secondary processor spin code
+ * 0x0100 - 0x2fff : pSeries Interrupt prologs
+ * 0x3000 - 0x3fff : Interrupt support
+ * 0x4000 - 0x4fff : NACA
+ * 0x5000 - 0x5fff : Initial segment table
+ * 0x6000 : iSeries and common interrupt prologs
+ */
+
+/*
+ * SPRG Usage
+ *
+ * Register Definition
+ *
+ * SPRG0 reserved for hypervisor
+ * SPRG1 temp - used to save gpr
+ * SPRG2 temp - used to save gpr
+ * SPRG3 virt addr of Paca
+ */
+
+/*
+ * Entering into this code we make the following assumptions:
+ * For pSeries:
+ * 1. The MMU is off & open firmware is running in real mode.
+ * 2. The kernel is entered at __start
+ *
+ * For iSeries:
+ * 1. The MMU is on (as it always is for iSeries)
+ * 2. The kernel is entered at SystemReset_Iseries
+ */
+
+ .text
+ .globl _stext
+_stext:
+_STATIC(__start)
+ b .__start_initialization_pSeries
+
+ /* At offset 0x20, there is a pointer to iSeries LPAR data.
+ * This is required by the hypervisor */
+ . = 0x20
+ .llong hvReleaseData-KERNELBASE
+
+ /* At offset 0x28 and 0x30 are offsets to the msChunks
+ * array (used by the iSeries LPAR debugger to do translation
+ * between physical addresses and absolute addresses) and
+ * to the pidhash table (also used by the debugger) */
+ .llong msChunks-KERNELBASE
+ .llong pidhash-KERNELBASE
+
+ /* Offset 0x38 - Pointer to start of embedded System.map */
+ .globl embedded_sysmap_start
+embedded_sysmap_start:
+ .llong 0
+ /* Offset 0x40 - Pointer to end of embedded System.map */
+ .globl embedded_sysmap_end
+embedded_sysmap_end:
+ .llong 0
+
+ /* Secondary processors spin on this value until it goes to 1. */
+ .globl __secondary_hold_spinloop
+__secondary_hold_spinloop:
+ .llong 0x0
+
+ /* Secondary processors write this value with their cpu # */
+ /* after they enter the spin loop immediatly below. */
+ .globl __secondary_hold_acknowledge
+__secondary_hold_acknowledge:
+ .llong 0x0
+
+ . = 0x60
+/*
+ * The following code is used on pSeries to hold secondary processors
+ * in a spin loop after they have been freed from OpenFirmware, but
+ * before the bulk of the kernel has been relocated. This code
+ * is relocated to physical address 0x60 before prom_init is run.
+ * All of it must fit below the first exception vector at 0x100.
+ */
+_GLOBAL(__secondary_hold)
+ /* Grab our linux cpu number */
+ mr r24,r3
+
+ /* Tell the master cpu we're here */
+ /* Relocation is off & we are located at an address less */
+ /* than 0x100, so only need to grab low order offset. */
+ std r24,__secondary_hold_acknowledge@l(0)
+
+ /* All secondary cpu's wait here until told to start. */
+100: ld r4,__secondary_hold_spinloop@l(0)
+ cmpdi 0,r4,1
+ bne 100b
+
+#ifdef CONFIG_HMT
+ b .hmt_init
+#else
+#ifdef CONFIG_SMP
+ mr r3,r24
+ b .pseries_secondary_smp_init
+#else
+ BUG_OPCODE
+#endif
+#endif
+
+/*
+ * The following macros define the code that appears as
+ * the prologue to each of the exception handlers. They
+ * are split into two parts to allow a single kernel binary
+ * to be used for pSeries, and iSeries.
+ */
+
+/*
+ * We make as much of the exception code common between native Pseries
+ * and Iseries LPAR implementations as possible.
+ */
+
+/*
+ * This is the start of the interrupt handlers for Pseries
+ * This code runs with relocation off.
+ */
+#define EX_SRR0 0
+#define EX_SRR1 8
+#define EX_R20 16
+#define EX_R21 24
+#define EX_R22 32
+#define EX_R23 40
+#define EX_DAR 48
+#define EX_DSISR 56
+
+#define EXCEPTION_PROLOG_PSERIES(label) \
+ mtspr SPRG2,r20; /* use SPRG2 as scratch reg */ \
+ mtspr SPRG1,r21; /* save r21 */ \
+ mfspr r20,SPRG3; /* get Paca virt addr */ \
+ ld r21,PACAEXCSP(r20); /* get exception stack ptr */ \
+ addi r21,r21,EXC_FRAME_SIZE; /* make exception frame */ \
+ std r22,EX_R22(r21); /* Save r22 in exc. frame */ \
+ std r23,EX_R23(r21); /* Save r23 in exc. frame */ \
+ mfspr r22,SRR0; /* EA of interrupted instr */ \
+ std r22,EX_SRR0(r21); /* Save SRR0 in exc. frame */ \
+ mfspr r23,SRR1; /* machine state at interrupt */ \
+ std r23,EX_SRR1(r21); /* Save SRR1 in exc. frame */ \
+ clrrdi r22,r20,60; /* Get 0xc part of the vaddr */ \
+ ori r22,r22,(label)@l; /* add in the vaddr offset */ \
+ /* assumes *_common < 16b */ \
+ mfmsr r23; \
+ rotldi r23,r23,4; \
+ ori r23,r23,0x30B; /* Set IR, DR, SF, ISF, HV */ \
+ rotldi r23,r23,60; /* for generic handlers */ \
+ mtspr SRR0,r22; \
+ mtspr SRR1,r23; \
+ mfcr r23; /* save CR in r23 */ \
+ rfid
+
+/*
+ * This is the start of the interrupt handlers for i_series
+ * This code runs with relocation on.
+ */
+#define EXCEPTION_PROLOG_ISERIES \
+ mtspr SPRG2,r20; /* use SPRG2 as scratch reg */\
+ mtspr SPRG1,r21; /* save r21 */\
+ mfspr r20,SPRG3; /* get Paca */\
+ ld r21,PACAEXCSP(r20); /* get exception stack ptr */\
+ addi r21,r21,EXC_FRAME_SIZE; /* make exception frame */\
+ std r22,EX_R22(r21); /* save r22 on exception frame */\
+ std r23,EX_R23(r21); /* Save r23 in exc. frame */\
+ ld r22,LPPACA+LPPACASRR0(r20); /* Get SRR0 from ItLpPaca */\
+ std r22,EX_SRR0(r21); /* save SRR0 in exc. frame */\
+ ld r23,LPPACA+LPPACASRR1(r20); /* Get SRR1 from ItLpPaca */\
+ std r23,EX_SRR1(r21); /* save SRR1 in exc. frame */\
+ mfcr r23; /* save CR in r23 */
+
+/*
+ * The common exception prolog is used for all except a few exceptions
+ * such as a segment miss on a kernel address. We have to be prepared
+ * to take another exception from the point where we first touch the
+ * kernel stack onwards.
+ *
+ * On entry r20 points to the paca and r21 points to the exception
+ * frame on entry, r23 contains the saved CR, and relocation is on.
+ */
+#define EXCEPTION_PROLOG_COMMON \
+ mfspr r22,SPRG2; /* Save r20 in exc. frame */ \
+ std r22,EX_R20(r21); \
+ mfspr r22,SPRG1; /* Save r21 in exc. frame */ \
+ std r22,EX_R21(r21); \
+ mfspr r22,DAR; /* Save DAR in exc. frame */ \
+ std r22,EX_DAR(r21); \
+ std r21,PACAEXCSP(r20); /* update exception stack ptr */ \
+ /* iff no protection flt */ \
+ mfspr r22,DSISR; /* Save DSISR in exc. frame */ \
+ std r22,EX_DSISR(r21); \
+ ld r22,EX_SRR1(r21); /* Get SRR1 from exc. frame */ \
+ andi. r22,r22,MSR_PR; /* Set CR for later branch */ \
+ mr r22,r1; /* Save r1 */ \
+ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
+ beq- 1f; \
+ ld r1,PACAKSAVE(r20); /* kernel stack to use */ \
+1: std r22,GPR1(r1); /* save r1 in stackframe */ \
+ std r22,0(r1); /* make stack chain pointer */ \
+ std r23,_CCR(r1); /* save CR in stackframe */ \
+ ld r22,EX_R20(r21); /* move r20 to stackframe */ \
+ std r22,GPR20(r1); \
+ ld r23,EX_R21(r21); /* move r21 to stackframe */ \
+ std r23,GPR21(r1); \
+ ld r22,EX_R22(r21); /* move r22 to stackframe */ \
+ std r22,GPR22(r1); \
+ ld r23,EX_R23(r21); /* move r23 to stackframe */ \
+ std r23,GPR23(r1); \
+ mflr r22; /* save LR in stackframe */ \
+ std r22,_LINK(r1); \
+ mfctr r23; /* save CTR in stackframe */ \
+ std r23,_CTR(r1); \
+ mfspr r22,XER; /* save XER in stackframe */ \
+ std r22,_XER(r1); \
+ ld r23,EX_DAR(r21); /* move DAR to stackframe */ \
+ std r23,_DAR(r1); \
+ ld r22,EX_DSISR(r21); /* move DSISR to stackframe */ \
+ std r22,_DSISR(r1); \
+ lbz r22,PACAPROCENABLED(r20); \
+ std r22,SOFTE(r1); \
+ ld r22,EX_SRR0(r21); /* get SRR0 from exc. frame */ \
+ ld r23,EX_SRR1(r21); /* get SRR1 from exc. frame */ \
+ addi r21,r21,-EXC_FRAME_SIZE;/* pop off exception frame */ \
+ std r21,PACAEXCSP(r20); \
+ SAVE_GPR(0, r1); /* save r0 in stackframe */ \
+ SAVE_8GPRS(2, r1); /* save r2 - r13 in stackframe */ \
+ SAVE_4GPRS(10, r1); \
+ ld r2,PACATOC(r20); \
+ ld r13,PACACURRENT(r20)
+
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r1, r22 (SRR0), and r23 (SRR1).
+ */
+
+/*
+ * Exception vectors.
+ */
+#define STD_EXCEPTION_PSERIES(n, label ) \
+ . = n; \
+ .globl label##_Pseries; \
+label##_Pseries: \
+ EXCEPTION_PROLOG_PSERIES( label##_common )
+
+#define STD_EXCEPTION_ISERIES( label ) \
+ .globl label##_Iseries; \
+label##_Iseries: \
+ EXCEPTION_PROLOG_ISERIES; \
+ b label##_common
+
+#define MASKABLE_EXCEPTION_ISERIES( label ) \
+ .globl label##_Iseries; \
+label##_Iseries: \
+ EXCEPTION_PROLOG_ISERIES; \
+ lbz r22,PACAPROFENABLED(r20); \
+ cmpi 0,r22,0; \
+ bne- label##_Iseries_profile; \
+label##_Iseries_prof_ret: \
+ lbz r22,PACAPROCENABLED(r20); \
+ cmpi 0,r22,0; \
+ beq- label##_Iseries_masked; \
+ b label##_common; \
+label##_Iseries_profile: \
+ std r24,48(r21); \
+ std r25,56(r21); \
+ mflr r24; \
+ bl do_profile; \
+ mtlr r24; \
+ ld r24,48(r21); \
+ ld r25,56(r21); \
+ b label##_Iseries_prof_ret
+
+#define STD_EXCEPTION_COMMON( trap, label, hdlr ) \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ li r20,0; \
+ li r6,trap; \
+ bl .save_remaining_regs; \
+ bl hdlr; \
+ b .ret_from_except
+
+/*
+ * Start of pSeries system interrupt routines
+ */
+ . = 0x100
+ .globl __start_interupts
+__start_interupts:
+
+ STD_EXCEPTION_PSERIES( 0x100, SystemReset )
+ STD_EXCEPTION_PSERIES( 0x200, MachineCheck )
+ STD_EXCEPTION_PSERIES( 0x300, DataAccess )
+ STD_EXCEPTION_PSERIES( 0x380, DataAccessSLB )
+ STD_EXCEPTION_PSERIES( 0x400, InstructionAccess )
+ STD_EXCEPTION_PSERIES( 0x480, InstructionAccessSLB )
+ STD_EXCEPTION_PSERIES( 0x500, HardwareInterrupt )
+ STD_EXCEPTION_PSERIES( 0x600, Alignment )
+ STD_EXCEPTION_PSERIES( 0x700, ProgramCheck )
+ STD_EXCEPTION_PSERIES( 0x800, FPUnavailable )
+ STD_EXCEPTION_PSERIES( 0x900, Decrementer )
+ STD_EXCEPTION_PSERIES( 0xa00, Trap_0a )
+ STD_EXCEPTION_PSERIES( 0xb00, Trap_0b )
+ STD_EXCEPTION_PSERIES( 0xc00, SystemCall )
+ STD_EXCEPTION_PSERIES( 0xd00, SingleStep )
+ STD_EXCEPTION_PSERIES( 0xe00, Trap_0e )
+ STD_EXCEPTION_PSERIES( 0xf00, PerformanceMonitor )
+ STD_EXCEPTION_PSERIES( 0x1300, InstructionBreakpoint )
+
+ . = 0x4000
+ .globl __end_interupts
+ .globl __start_naca
+__end_interupts:
+__start_naca:
+ /* Save space for naca.
+ * The first dword of the Naca is required by iSeries LPAR to
+ * point to itVpdAreas. On pSeries native, this value is not used.
+ */
+ .llong itVpdAreas
+ .llong 0x0
+ .llong 0x0
+ .llong xPaca
+
+ /*
+ * Space for the initial segment table
+ * For LPAR, the hypervisor must fill in at least one entry
+ * before we get control (with relocate on)
+ */
+
+ . = 0x5000
+ .globl __end_naca
+ .globl __start_stab
+__end_naca:
+__start_stab:
+
+
+ . = 0x6000
+ .globl __end_stab
+__end_stab:
+
+ /*
+ * The iSeries LPAR map is at this fixed address
+ * so that the HvReleaseData structure can address
+ * it with a 32-bit offset.
+ *
+ * The VSID values below are dependent on the
+ * VSID generation algorithm. See include/asm/mmu_context.h.
+ */
+
+ .llong 1 /* # ESIDs to be mapped by hypervisor */
+ .llong 1 /* # memory ranges to be mapped by hypervisor */
+ .llong 5 /* Page # of segment table within load area */
+ .llong 0 /* Reserved */
+ .llong 0 /* Reserved */
+ .llong 0 /* Reserved */
+ .llong 0 /* Reserved */
+ .llong 0 /* Reserved */
+ .llong 0x0c00000000 /* ESID to map (Kernel at EA = 0xC000000000000000) */
+ .llong 0x06a99b4b14 /* VSID to map (Kernel at VA = 0x6a99b4b140000000) */
+ .llong 8192 /* # pages to map (32 MB) */
+ .llong 0 /* Offset from start of loadarea to start of map */
+ .llong 0x0006a99b4b140000 /* VPN of first page to map */
+
+ . = 0x6100
+
+/*** ISeries-LPAR interrupt handlers ***/
+
+ STD_EXCEPTION_ISERIES( MachineCheck )
+ STD_EXCEPTION_ISERIES( DataAccess )
+ STD_EXCEPTION_ISERIES( DataAccessSLB )
+ STD_EXCEPTION_ISERIES( InstructionAccess )
+ STD_EXCEPTION_ISERIES( InstructionAccessSLB )
+ MASKABLE_EXCEPTION_ISERIES( HardwareInterrupt )
+ STD_EXCEPTION_ISERIES( Alignment )
+ STD_EXCEPTION_ISERIES( ProgramCheck )
+ STD_EXCEPTION_ISERIES( FPUnavailable )
+ MASKABLE_EXCEPTION_ISERIES( Decrementer )
+ STD_EXCEPTION_ISERIES( Trap_0a )
+ STD_EXCEPTION_ISERIES( Trap_0b )
+ STD_EXCEPTION_ISERIES( SystemCall )
+ STD_EXCEPTION_ISERIES( SingleStep )
+ STD_EXCEPTION_ISERIES( Trap_0e )
+ STD_EXCEPTION_ISERIES( PerformanceMonitor )
+
+ .globl SystemReset_Iseries
+SystemReset_Iseries:
+ mfspr 25,SPRG3 /* Get Paca address */
+ lhz r24,PACAPACAINDEX(r25) /* Get processor # */
+ cmpi 0,r24,0 /* Are we processor 0? */
+ beq .__start_initialization_iSeries /* Start up the first processor */
+ mfspr r4,CTRLF
+ li r5,RUNLATCH /* Turn off the run light */
+ andc r4,r4,r5
+ mtspr CTRLT,r4
+
+1:
+ HMT_LOW
+#ifdef CONFIG_SMP
+ lbz r23,PACAPROCSTART(r25) /* Test if this processor
+ * should start */
+ sync
+ LOADADDR(r3,current_set)
+ sldi r28,r24,3 /* get current_set[cpu#] */
+ ldx r3,r3,r28
+ addi r1,r3,THREAD_SIZE
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ cmpi 0,r23,0
+ beq iseries_secondary_smp_loop /* Loop until told to go */
+#ifdef SECONDARY_PROCESSORS
+ bne .__secondary_start /* Loop until told to go */
+#endif
+iseries_secondary_smp_loop:
+ /* Let the Hypervisor know we are alive */
+ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
+ lis r3,0x8002
+ rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
+#else /* CONFIG_SMP */
+ /* Yield the processor. This is required for non-SMP kernels
+ which are running on multi-threaded machines. */
+ lis r3,0x8000
+ rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
+ addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
+ li r4,0 /* "yield timed" */
+ li r5,-1 /* "yield forever" */
+#endif /* CONFIG_SMP */
+ li r0,-1 /* r0=-1 indicates a Hypervisor call */
+ sc /* Invoke the hypervisor via a system call */
+ mfspr r25,SPRG3 /* Put r25 back ???? */
+ b 1b /* If SMP not configured, secondaries
+ * loop forever */
+
+ .globl HardwareInterrupt_Iseries_masked
+HardwareInterrupt_Iseries_masked:
+ b maskable_exception_exit
+
+ .globl Decrementer_Iseries_masked
+Decrementer_Iseries_masked:
+ li r22,1
+ stb r22,PACALPPACA+LPPACADECRINT(r20)
+ lwz r22,PACADEFAULTDECR(r20)
+ mtspr DEC,r22
+maskable_exception_exit:
+ mtcrf 0xff,r23 /* Restore regs and free exception frame */
+ ld r22,EX_SRR0(r21)
+ ld r23,EX_SRR1(r21)
+ mtspr SRR0,r22
+ mtspr SRR1,r23
+ ld r22,EX_R22(r21)
+ ld r23,EX_R23(r21)
+ mfspr r21,SPRG1
+ mfspr r20,SPRG2
+ rfid
+
+/*** Common interrupt handlers ***/
+
+ STD_EXCEPTION_COMMON( 0x100, SystemReset, .SystemResetException )
+ STD_EXCEPTION_COMMON( 0x200, MachineCheck, .MachineCheckException )
+ STD_EXCEPTION_COMMON( 0x900, Decrementer, .timer_interrupt )
+ STD_EXCEPTION_COMMON( 0xa00, Trap_0a, .UnknownException )
+ STD_EXCEPTION_COMMON( 0xb00, Trap_0b, .UnknownException )
+ STD_EXCEPTION_COMMON( 0xd00, SingleStep, .SingleStepException )
+ STD_EXCEPTION_COMMON( 0xe00, Trap_0e, .UnknownException )
+ STD_EXCEPTION_COMMON( 0xf00, PerformanceMonitor, .PerformanceMonitorException )
+ STD_EXCEPTION_COMMON(0x1300, InstructionBreakpoint, .InstructionBreakpointException )
+
+/*
+ * Return from an exception which is handled without calling
+ * save_remaining_regs. The caller is assumed to have done
+ * EXCEPTION_PROLOG_COMMON.
+ */
+fast_exception_return:
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtspr XER,r6
+ REST_GPR(0, r1)
+ REST_8GPRS(2, r1)
+ REST_4GPRS(10, r1)
+ mtspr SRR1,r23
+ mtspr SRR0,r22
+ REST_4GPRS(20, r1)
+ ld r1,GPR1(r1)
+ rfid
+
+/*
+ * Here r20 points to the PACA, r21 to the exception frame,
+ * r23 contains the saved CR.
+ * r20 - r23, SRR0 and SRR1 are saved in the exception frame.
+ */
+ .globl DataAccess_common
+DataAccess_common:
+ mfspr r22,DAR
+ srdi r22,r22,60
+ cmpi 0,r22,0xc
+
+ /* Segment fault on a bolted segment. Go off and map that segment. */
+ beq .do_stab_bolted
+stab_bolted_user_return:
+ EXCEPTION_PROLOG_COMMON
+ ld r3,_DSISR(r1)
+ andis. r0,r3,0xa450 /* weird error? */
+ bne 1f /* if not, try to put a PTE */
+ andis. r0,r3,0x0020 /* Is it a page table fault? */
+ rlwinm r4,r3,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
+ ld r3,_DAR(r1) /* into the hash table */
+
+ beq 2f /* If so handle it */
+ li r4,0x300 /* Trap number */
+ bl .do_stab_SI
+ b 1f
+
+2: bl .do_hash_page_DSI /* Try to handle as hpte fault */
+1:
+ ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1) /* Copy saved SOFTE bit */
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0x300
+ bl .save_remaining_regs
+ bl .do_page_fault
+ b .ret_from_except
+
+ .globl DataAccessSLB_common
+DataAccessSLB_common:
+ mfspr r22,DAR
+ srdi r22,r22,60
+ cmpi 0,r22,0xc
+
+ /* Segment fault on a bolted segment. Go off and map that segment. */
+ beq .do_slb_bolted
+
+ EXCEPTION_PROLOG_COMMON
+ ld r3,_DAR(r1)
+ li r4,0x380 /* Exception vector */
+ bl .ste_allocate
+ or. r3,r3,r3 /* Check return code */
+ beq fast_exception_return /* Return if we succeeded */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1)
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0x380
+ bl .save_remaining_regs
+ bl .do_page_fault
+ b .ret_from_except
+
+ .globl InstructionAccess_common
+InstructionAccess_common:
+ EXCEPTION_PROLOG_COMMON
+
+ andis. r0,r23,0x0020 /* no ste found? */
+ beq 2f
+ mr r3,r22 /* SRR0 at interrupt */
+ li r4,0x400 /* Trap number */
+ bl .do_stab_SI
+ b 1f
+
+2: andis. r0,r23,0x4000 /* no pte found? */
+ beq 1f /* if so, try to put a PTE */
+ mr r3,r22 /* into the hash table */
+ bl .do_hash_page_ISI /* Try to handle as hpte fault */
+1:
+ mr r4,r22
+ mr r5,r23
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1)
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0x400
+ bl .save_remaining_regs
+ bl .do_page_fault
+ b .ret_from_except
+
+ .globl InstructionAccessSLB_common
+InstructionAccessSLB_common:
+ EXCEPTION_PROLOG_COMMON
+ mr r3,r22 /* SRR0 = NIA */
+ li r4,0x480 /* Exception vector */
+ bl .ste_allocate
+ or. r3,r3,r3 /* Check return code */
+ beq fast_exception_return /* Return if we succeeded */
+
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1)
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0x380
+ bl .save_remaining_regs
+ bl .do_page_fault
+ b .ret_from_except
+
+ .globl HardwareInterrupt_common
+HardwareInterrupt_common:
+ EXCEPTION_PROLOG_COMMON
+HardwareInterrupt_entry:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,0
+ li r6,0x500
+ bl .save_remaining_regs
+ /* Determine if need to run do_irq on a hardware interrupt stack */
+ /* The first invocation of do_irq will occur on the kernel */
+ /* stack in the current stack */
+ /* All other invocations of do_irq will run on the hardware */
+ /* interrupt stack associated with the PACA of the current */
+ /* processor. */
+ /* */
+ /* The call to do_irq will preserve the value of r14 - r31 */
+ /* */
+ mfspr r20,SPRG3 /* get Paca */
+ lbz r21,PACAHRDWINTCOUNT(r20) /* get hardware interrupt cnt */
+ cmpi 0,r21,1 /* */
+ addi r21,r21,1 /* incr hardware interrupt cnt*/
+ stb r21,PACAHRDWINTCOUNT(r20) /* */
+ bne 2f /* */
+
+ mr r14,r1 /* preserve current r1 */
+ ld r1,PACAHRDWINTSTACK(r20) /* */
+ std r14,0(r1) /* set the back chain */
+ bl .do_IRQ
+ lbz r22,PACAHRDWINTCOUNT(r20) /* get hardware interrupt cnt */
+ cmp 0,r22,r21 /* debug test */
+ bne 3f
+ subi r21,r21,1
+ stb r21,PACAHRDWINTCOUNT(r20) /* */
+ mr r1,r14 /* */
+ b .ret_from_except
+
+2:
+ bl .do_IRQ
+
+ lbz r22,PACAHRDWINTCOUNT(r20) /* get hardware interrupt cnt */
+ cmp 0,r22,r21 /* debug test */
+ bne 3f /* */
+ subi r21,r21,1 /* decr hardware interrupt cnt*/
+ stb r21,PACAHRDWINTCOUNT(r20) /* */
+
+ b .ret_from_except
+
+3:
+ /* error - counts out of sync */
+#ifdef CONFIG_XMON
+ bl .xmon
+#endif
+4: b 4b
+
+
+ .globl Alignment_common
+Alignment_common:
+ EXCEPTION_PROLOG_COMMON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1)
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0x600
+ bl .save_remaining_regs
+ bl .AlignmentException
+ b .ret_from_except
+
+ .globl ProgramCheck_common
+ProgramCheck_common:
+ EXCEPTION_PROLOG_COMMON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1)
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0x700
+ bl .save_remaining_regs
+ bl .ProgramCheckException
+ b .ret_from_except
+
+ .globl FPUnavailable_common
+FPUnavailable_common:
+ EXCEPTION_PROLOG_COMMON
+ bne .load_up_fpu /* if from user, just load it up */
+ li r20,0
+ li r6,0x800
+ bl .save_remaining_regs /* if from kernel, take a trap */
+ bl .KernelFP
+ b .ret_from_except
+
+ .globl SystemCall_common
+SystemCall_common:
+ EXCEPTION_PROLOG_COMMON
+#ifdef CONFIG_PPC_ISERIES
+ cmpi 0,r0,0x5555 /* Special syscall to handle pending */
+ bne+ 1f /* interrupts */
+ andi. r6,r23,MSR_PR /* Only allowed from kernel */
+ beq+ HardwareInterrupt_entry
+1:
+#endif
+ std r3,ORIG_GPR3(r1)
+#ifdef DO_SOFT_DISABLE
+ ld r20,SOFTE(r1)
+#else
+ rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
+#endif
+ li r6,0xC00
+ bl .save_remaining_regs
+ bl .DoSyscall
+ b .ret_from_except
+
+_GLOBAL(do_hash_page_ISI)
+ li r4,0
+_GLOBAL(do_hash_page_DSI)
+ rlwimi r4,r23,32-13,30,30 /* Insert MSR_PR as _PAGE_USER */
+ ori r4,r4,1 /* add _PAGE_PRESENT */
+
+ mflr r21 /* Save LR in r21 */
+
+#ifdef DO_SOFT_DISABLE
+ /*
+ * We hard enable here (but first soft disable) so that the hash_page
+ * code can spin on the hash_table_lock with problem on a shared
+ * processor.
+ */
+ li r0,0
+ stb r0,PACAPROCENABLED(r20) /* Soft Disabled */
+
+ mfmsr r0
+ ori r0,r0,MSR_EE+MSR_RI
+ mtmsrd r0 /* Hard Enable, RI on */
+#endif
+
+ /*
+ * r3 contains the faulting address
+ * r4 contains the required access permissions
+ *
+ * at return r3 = 0 for success
+ */
+
+ bl .hash_page /* build HPTE if possible */
+
+#ifdef DO_SOFT_DISABLE
+ /*
+ * Now go back to hard disabled.
+ */
+ mfmsr r0
+ li r4,0
+ ori r4,r4,MSR_EE+MSR_RI
+ andc r0,r0,r4
+ mtmsrd r0 /* Hard Disable, RI off */
+
+ ld r0,SOFTE(r1)
+ cmpdi 0,r0,0 /* See if we will soft enable in */
+ /* save_remaining_regs */
+ beq 5f
+ CHECKANYINT(r4,r5)
+ bne- HardwareInterrupt_entry /* Convert this DSI into an External */
+ /* to process interrupts which occurred */
+ /* during hash_page */
+5:
+ stb r0,PACAPROCENABLED(r20) /* Restore soft enable/disable status */
+#endif
+ or. r3,r3,r3 /* Check return code */
+ beq fast_exception_return /* Return from exception on success */
+
+ mtlr r21 /* restore LR */
+ blr /* Return to DSI or ISI on failure */
+
+/*
+ * r20 points to the PACA, r21 to the exception frame,
+ * r23 contains the saved CR.
+ * r20 - r23, SRR0 and SRR1 are saved in the exception frame.
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(do_stab_bolted)
+ std r23,EX_DAR(r21) /* save CR in exc. frame */
+
+ mfspr r22,DSISR
+ andis. r22,r22,0x0020
+ bne+ 2f
+ ld r22,8(r21) /* get SRR1 */
+ andi. r22,r22,MSR_PR /* check if from user */
+ bne+ stab_bolted_user_return /* from user, send the error on up */
+ li r3,0
+#ifdef CONFIG_XMON
+ bl .xmon
+#endif
+1: b 1b
+2:
+ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
+ mfspr r21,DAR
+ rldicl r20,r21,36,32 /* Permits a full 32b of ESID */
+ rldicr r20,r20,15,48
+ rldicl r21,r21,4,60
+ or r20,r20,r21
+
+ li r21,9 /* VSID_RANDOMIZER */
+ sldi r21,r21,32
+ oris r21,r21,58231
+ ori r21,r21,39831
+
+ mulld r20,r20,r21
+ clrldi r20,r20,28 /* r20 = vsid */
+
+ mfsprg r21,3
+ ld r21,PACASTABVIRT(r21)
+
+ /* Hash to the primary group */
+ mfspr r22,DAR
+ rldicl r22,r22,36,59
+ rldicr r22,r22,7,56
+ or r21,r21,r22 /* r21 = first ste of the group */
+
+ /* Search the primary group for a free entry */
+ li r22,0
+1:
+ ld r23,0(r21) /* Test valid bit of the current ste */
+ rldicl r23,r23,57,63
+ cmpwi r23,0
+ bne 2f
+ ld r23,8(r21) /* Get the current vsid part of the ste */
+ rldimi r23,r20,12,0 /* Insert the new vsid value */
+ std r23,8(r21) /* Put new entry back into the stab */
+ eieio /* Order vsid update */
+ ld r23,0(r21) /* Get the esid part of the ste */
+ mfspr r20,DAR /* Get the new esid */
+ rldicl r20,r20,36,28 /* Permits a full 36b of ESID */
+ rldimi r23,r20,28,0 /* Insert the new esid value */
+ ori r23,r23,144 /* Turn on valid and kp */
+ std r23,0(r21) /* Put new entry back into the stab */
+ sync /* Order the update */
+ b 3f
+2:
+ addi r22,r22,1
+ addi r21,r21,16
+ cmpldi r22,7
+ ble 1b
+
+ /* Stick for only searching the primary group for now. */
+ /* At least for now, we use a very simple random castout scheme */
+ /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
+ mftb r22
+ andi. r22,r22,7
+ ori r22,r22,1
+ sldi r22,r22,4
+
+ /* r21 currently points to and ste one past the group of interest */
+ /* make it point to the randomly selected entry */
+ subi r21,r21,128
+ or r21,r21,r22 /* r21 is the entry to invalidate */
+
+ isync /* mark the entry invalid */
+ ld r23,0(r21)
+ li r22,-129
+ and r23,r23,r22
+ std r23,0(r21)
+ sync
+
+ ld r23,8(r21)
+ rldimi r23,r20,12,0
+ std r23,8(r21)
+ eieio
+
+ ld r23,0(r21) /* Get the esid part of the ste */
+ mr r22,r23
+ mfspr r20,DAR /* Get the new esid */
+ rldicl r20,r20,36,28 /* Permits a full 32b of ESID */
+ rldimi r23,r20,28,0 /* Insert the new esid value */
+ ori r23,r23,144 /* Turn on valid and kp */
+ std r23,0(r21) /* Put new entry back into the stab */
+
+ rldicl r22,r22,36,28
+ rldicr r22,r22,28,35
+ slbie r22
+ sync
+
+3:
+ /* All done -- return from exception. */
+ mfsprg r20,3 /* Load the PACA pointer */
+ ld r21,PACAEXCSP(r20) /* Get the exception frame pointer */
+ addi r21,r21,EXC_FRAME_SIZE
+ ld r23,EX_DAR(r21) /* get saved CR */
+ /* note that this is almost identical to maskable_exception_exit */
+ mtcr r23 /* restore CR */
+ ld r22,EX_SRR0(r21) /* Get SRR0 from exc. frame */
+ ld r23,EX_SRR1(r21) /* Get SRR1 from exc. frame */
+ mtspr SRR0,r22
+ mtspr SRR1,r23
+ ld r22,EX_R22(r21) /* restore r22 and r23 */
+ ld r23,EX_R23(r21)
+ mfspr r20,SPRG2
+ mfspr r21,SPRG1
+ rfid
+_TRACEBACK(do_stab_bolted)
+
+/*
+ * r20 points to the PACA, r21 to the exception frame,
+ * r23 contains the saved CR.
+ * r20 - r23, SRR0 and SRR1 are saved in the exception frame.
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(do_slb_bolted)
+ std r23,48(r21) /* save CR in exc. frame */
+
+ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
+ mfspr r21,DAR
+ rldicl r20,r21,36,32 /* Permits a full 32b of ESID */
+ rldicr r20,r20,15,48
+ rldicl r21,r21,4,60
+ or r20,r20,r21
+
+ li r21,9 /* VSID_RANDOMIZER */
+ sldi r21,r21,32
+ oris r21,r21,58231
+ ori r21,r21,39831
+
+ mulld r20,r20,r21
+ clrldi r20,r20,28 /* r20 = vsid */
+
+ /* Search the SLB for a free entry */
+ li r22,1
+1:
+ slbmfee r23,r22
+ rldicl r23,r23,37,63
+ cmpwi r23,0
+ beq 3f /* Found an invalid entry */
+
+ addi r22,r22,1
+ cmpldi r22,64
+ blt 1b
+
+ /* No free entry - just take the next entry, round-robin */
+ /* XXX we should get the number of SLB entries from the naca */
+SLB_NUM_ENTRIES = 64
+ mfspr r21,SPRG3
+ ld r22,PACASTABRR(r21)
+ addi r23,r22,1
+ cmpdi r23,SLB_NUM_ENTRIES
+ blt 2f
+ li r23,1
+2: std r23,PACASTABRR(r21)
+
+ /* r20 = vsid, r22 = entry */
+3:
+ /* Put together the vsid portion of the entry. */
+ li r21,0
+ rldimi r21,r20,12,0
+ ori r20,r21,1024
+#ifndef CONFIG_PPC_ISERIES
+ ori r20,r20,256 /* map kernel region with large ptes */
+#endif
+
+ /* Put together the esid portion of the entry. */
+ mfspr r21,DAR /* Get the new esid */
+ rldicl r21,r21,36,28 /* Permits a full 36b of ESID */
+ li r23,0
+ rldimi r23,r21,28,0 /* Insert esid */
+ oris r21,r23,2048 /* valid bit */
+ rldimi r21,r22,0,52 /* Insert entry */
+
+ isync
+ slbmte r20,r21
+ isync
+
+ /* All done -- return from exception. */
+ mfsprg r20,3 /* Load the PACA pointer */
+ ld r21,PACAEXCSP(r20) /* Get the exception frame pointer */
+ addi r21,r21,EXC_FRAME_SIZE
+ ld r23,EX_DAR(r21) /* get saved CR */
+ /* note that this is almost identical to maskable_exception_exit */
+ mtcr r23 /* restore CR */
+ ld r22,EX_SRR0(r21) /* Get SRR0 from exc. frame */
+ ld r23,EX_SRR1(r21) /* Get SRR1 from exc. frame */
+ mtspr SRR0,r22
+ mtspr SRR1,r23
+ ld r22,EX_R22(r21) /* restore r22 and r23 */
+ ld r23,EX_R23(r21)
+ mfspr r20,SPRG2
+ mfspr r21,SPRG1
+ rfid
+_TRACEBACK(do_slb_bolted)
+
+_GLOBAL(do_stab_SI)
+ mflr r21 /* Save LR in r21 */
+
+ /*
+ * r3 contains the faulting address
+ * r4 contains the required access permissions
+ *
+ * at return r3 = 0 for success
+ */
+
+ bl .ste_allocate /* build STE if possible */
+ or. r3,r3,r3 /* Check return code */
+ beq fast_exception_return /* Return from exception on success */
+ mtlr r21 /* restore LR */
+ blr /* Return to DSI or ISI on failure */
+
+/*
+ * This code finishes saving the registers to the exception frame.
+ * Address translation is already on.
+ */
+_GLOBAL(save_remaining_regs)
+ /*
+ * Save the rest of the registers into the pt_regs structure
+ */
+ std r22,_NIP(r1)
+ std r23,_MSR(r1)
+ std r6,TRAP(r1)
+ ld r6,GPR6(r1)
+ SAVE_2GPRS(14, r1)
+ SAVE_4GPRS(16, r1)
+ SAVE_8GPRS(24, r1)
+
+ /*
+ * Clear the RESULT field
+ */
+ li r22,0
+ std r22,RESULT(r1)
+
+ /*
+ * Test if from user state; result will be tested later
+ */
+ andi. r23,r23,MSR_PR /* Set CR for later branch */
+
+ /*
+ * Indicate that r1 contains the kernel stack and
+ * get the Kernel TOC and CURRENT pointers from the Paca
+ */
+ mfspr r23,SPRG3 /* Get PACA */
+ std r22,PACAKSAVE(r23) /* r1 is now kernel sp */
+ ld r2,PACATOC(r23) /* Get Kernel TOC pointer */
+
+ /*
+ * If from user state, update THREAD.regs
+ */
+ beq 2f /* Modify THREAD.regs if from user */
+ addi r24,r1,STACK_FRAME_OVERHEAD
+ std r24,THREAD+PT_REGS(r13)
+2:
+ SET_REG_TO_CONST(r22, MSR_KERNEL)
+
+#ifdef DO_SOFT_DISABLE
+ stb r20,PACAPROCENABLED(r23) /* possibly soft enable */
+ ori r22,r22,MSR_EE /* always hard enable */
+#else
+ rldimi r22,r20,15,48 /* Insert desired EE value */
+#endif
+
+ mtmsrd r22
+ blr
+
+
+do_profile:
+ ld r22,8(r21) /* Get SRR1 */
+ andi. r22,r22,MSR_PR /* Test if in kernel */
+ bnelr /* return if not in kernel */
+ ld r22,0(r21) /* Get SRR0 */
+ ld r25,PACAPROFSTEXT(r20) /* _stext */
+ subf r22,r25,r22 /* offset into kernel */
+ lwz r25,PACAPROFSHIFT(r20)
+ srd r22,r22,r25
+ lwz r25,PACAPROFLEN(r20) /* length of profile table (-1) */
+ cmp 0,r22,r25 /* off end? */
+ ble 1f
+ mr r22,r25 /* force into last entry */
+1: sldi r22,r22,2 /* convert to offset into buffer */
+ ld r25,PACAPROFBUFFER(r20) /* profile buffer */
+ add r25,r25,r22
+2: lwarx r22,0,r25 /* atomically increment */
+ addi r22,r22,1
+ stwcx. r22,0,r25
+ bne- 2b
+ blr
+
+
+/*
+ * On pSeries, secondary processors spin in the following code.
+ * At entry, r3 = this processor's number (in Linux terms, not hardware).
+ */
+_GLOBAL(pseries_secondary_smp_init)
+
+ /* turn on 64-bit mode */
+ bl .enable_64b_mode
+ isync
+
+ /* Set up a Paca value for this processor. */
+ LOADADDR(r24, xPaca) /* Get base vaddr of Paca array */
+ mulli r25,r3,PACA_SIZE /* Calculate vaddr of right Paca */
+ add r25,r25,r24 /* for this processor. */
+
+ mtspr SPRG3,r25 /* Save vaddr of Paca in SPRG3 */
+ mr r24,r3 /* __secondary_start needs cpu# */
+
+1:
+ HMT_LOW
+ lbz r23,PACAPROCSTART(r25) /* Test if this processor should */
+ /* start. */
+ sync
+
+ /* Create a temp kernel stack for use before relocation is on. */
+ mr r1,r25
+ addi r1,r1,PACAGUARD
+ addi r1,r1,0x1000
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ cmpi 0,r23,0
+#ifdef CONFIG_SMP
+#ifdef SECONDARY_PROCESSORS
+ bne .__secondary_start
+#endif
+#endif
+ b 1b /* Loop until told to go */
+
+_GLOBAL(__start_initialization_iSeries)
+
+ LOADADDR(r1,init_thread_union)
+ addi r1,r1,THREAD_SIZE
+ li r0,0
+ stdu r0,-STACK_FRAME_OVERHEAD(r1)
+
+ LOADADDR(r2,__toc_start)
+ addi r2,r2,0x4000
+ addi r2,r2,0x4000
+
+ LOADADDR(r9,naca)
+ SET_REG_TO_CONST(r4, KERNELBASE)
+ addi r4,r4,0x4000
+ std r4,0(r9) /* set the naca pointer */
+
+ /* Get the pointer to the segment table */
+ ld r6,PACA(r4) /* Get the base Paca pointer */
+ ld r4,PACASTABVIRT(r6)
+
+ bl .iSeries_fixup_klimit
+
+ b .start_here_common
+
+_GLOBAL(__start_initialization_pSeries)
+ mr r31,r3 /* save parameters */
+ mr r30,r4
+ mr r29,r5
+ mr r28,r6
+ mr r27,r7
+ mr r26,r8 /* YABOOT: debug_print() routine */
+ mr r25,r9 /* YABOOT: debug_delay() routine */
+ mr r24,r10 /* YABOOT: debug_prom() routine */
+
+ bl .enable_64b_mode
+
+ /* put a relocation offset into r3 */
+ bl .reloc_offset
+
+ LOADADDR(r2,__toc_start)
+ addi r2,r2,0x4000
+ addi r2,r2,0x4000
+
+ /* Relocate the TOC from a virt addr to a real addr */
+ sub r2,r2,r3
+
+ /* setup the naca pointer which is needed by prom_init */
+ LOADADDR(r9,naca)
+ sub r9,r9,r3 /* addr of the variable naca */
+
+ SET_REG_TO_CONST(r4, KERNELBASE)
+ sub r4,r4,r3
+ addi r4,r4,0x4000
+ std r4,0(r9) /* set the value of naca */
+
+ /* DRENG / PPPBBB Fix the following comment!!! -Peter */
+ /* The following copies the first 0x100 bytes of code from the */
+ /* load addr to physical addr 0x0. This code causes secondary */
+ /* processors to spin until a flag in the PACA is set. This */
+ /* is done at this time rather than with the entire kernel */
+ /* relocation which is done below because we need to cause the */
+ /* processors to spin on code that is not going to move while OF */
+ /* is still alive. Although the spin code is not actually run on */
+ /* a uniprocessor, we always do this copy. */
+ SET_REG_TO_CONST(r4, KERNELBASE)/* Src addr */
+ sub r4,r4,r3 /* current address of __start */
+ /* the source addr */
+ li r3,0 /* Dest addr */
+ li r5,0x100 /* # bytes of memory to copy */
+ li r6,0 /* Destination offset */
+ bl .copy_and_flush /* copy the first 0x100 bytes */
+
+ mr r3,r31
+ mr r4,r30
+ mr r5,r29
+ mr r6,r28
+ mr r7,r27
+ mr r8,r26
+ mr r9,r25
+ mr r10,r24
+
+ bl .prom_init
+
+ li r24,0 /* cpu # */
+
+/*
+ * At this point, r3 contains the physical address we are running at,
+ * returned by prom_init()
+ */
+_STATIC(__after_prom_start)
+
+/*
+ * We need to run with __start at physical address 0.
+ * This will leave some code in the first 256B of
+ * real memory, which are reserved for software use.
+ * The remainder of the first page is loaded with the fixed
+ * interrupt vectors. The next two pages are filled with
+ * unknown exception placeholders.
+ *
+ * Note: This process overwrites the OF exception vectors.
+ * r26 == relocation offset
+ * r27 == KERNELBASE
+ */
+ bl .reloc_offset
+ mr r26,r3
+ SET_REG_TO_CONST(r27,KERNELBASE)
+
+ li r3,0 /* target addr */
+
+ sub r4,r27,r26 /* source addr */
+ /* current address of _start */
+ /* i.e. where we are running */
+ /* the source addr */
+
+ LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
+ sub r5,r5,r27
+
+ li r6,0x100 /* Start offset, the first 0x100 */
+ /* bytes were copied earlier. */
+
+ bl .copy_and_flush /* copy the first n bytes */
+ /* this includes the code being */
+ /* executed here. */
+
+ li r0,4f@l /* Jump to the copy of this code */
+ mtctr r0 /* that we just made */
+ bctr
+
+4: LOADADDR(r9,rtas)
+ sub r9,r9,r26
+ ld r5,RTASBASE(r9) /* get the value of rtas->base */
+ ld r9,RTASSIZE(r9) /* get the value of rtas->size */
+ bl .copy_and_flush /* copy upto rtas->base */
+ add r6,r6,r9 /* then skip over rtas->size bytes */
+
+ LOADADDR(r5,klimit)
+ sub r5,r5,r26
+ ld r5,0(r5) /* get the value of klimit */
+ sub r5,r5,r27
+ bl .copy_and_flush /* copy the rest */
+ b .start_here_pSeries
+
+/*
+ * Copy routine used to copy the kernel to start at physical address 0
+ * and flush and invalidate the caches as needed.
+ * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
+ * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
+ *
+ * Note: this routine *only* clobbers r0, r6 and lr
+ */
+_STATIC(copy_and_flush)
+ addi r5,r5,-8
+ addi r6,r6,-8
+4: li r0,16 /* Use the least common */
+ /* denominator cache line */
+ /* size. This results in */
+ /* extra cache line flushes */
+ /* but operation is correct. */
+ /* Can't get cache line size */
+ /* from NACA as it is being */
+ /* moved too. */
+
+ mtctr r0 /* put # words/line in ctr */
+3: addi r6,r6,8 /* copy a cache line */
+ ldx r0,r6,r4
+ stdx r0,r6,r3
+ bdnz 3b
+ dcbst r6,r3 /* write it to memory */
+ sync
+ icbi r6,r3 /* flush the icache line */
+ cmpld 0,r6,r5
+ blt 4b
+ sync
+ addi r5,r5,8
+ addi r6,r6,8
+ blr
+
+.align 8
+copy_to_here:
+
+/*
+ * Disable FP for the task which had the FPU previously,
+ * and save its floating-point registers in its thread_struct.
+ * Enables the FPU for use in the kernel on return.
+ * On SMP we know the fpu is free, since we give it up every
+ * switch. -- Cort
+ */
+_STATIC(load_up_fpu)
+ mfmsr r5 /* grab the current MSR */
+ ori r5,r5,MSR_FP
+ mtmsrd r5 /* enable use of fpu now */
+ isync
+/*
+ * For SMP, we don't do lazy FPU switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_fpu in switch_to.
+ *
+ */
+#ifndef CONFIG_SMP
+ LOADBASE(r3,last_task_used_math)
+ ld r4,last_task_used_math@l(r3)
+ cmpi 0,r4,0
+ beq 1f
+ addi r4,r4,THREAD /* want THREAD of last_task_used_math */
+ SAVE_32FPRS(0, r4)
+ mffs fr0
+ stfd fr0,THREAD_FPSCR-4(r4)
+ ld r5,PT_REGS(r4)
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ li r20,MSR_FP|MSR_FE0|MSR_FE1
+ andc r4,r4,r20 /* disable FP for previous task */
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+ /* enable use of FP after return */
+ ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1
+ addi r5,r13,THREAD /* Get THREAD */
+ lfd fr0,THREAD_FPSCR-4(r5)
+ mtfsf 0xff,fr0
+ REST_32FPRS(0, r5)
+#ifndef CONFIG_SMP
+ subi r4,r5,THREAD /* Back to 'current' */
+ std r4,last_task_used_math@l(r3)
+#endif /* CONFIG_SMP */
+ /* restore registers and return */
+ b fast_exception_return
+
+/*
+ * FP unavailable trap from kernel - print a message, but let
+ * the task use FP in the kernel until it returns to user mode.
+ */
+_GLOBAL(KernelFP)
+ ld r3,_MSR(r1)
+ ori r3,r3,MSR_FP
+ std r3,_MSR(r1) /* enable use of FP after return */
+ LOADADDR(r3,86f)
+ mfspr r4,SPRG3 /* Get PACA */
+ ld r4,PACACURRENT(r4) /* current */
+ ld r5,_NIP(r1)
+ b .ret_from_except
+86: .string "floating point used in kernel (task=%p, pc=%x)\n"
+ .align 4
+
+/*
+ * giveup_fpu(tsk)
+ * Disable FP for the task given as the argument,
+ * and save the floating-point registers in its thread_struct.
+ * Enables the FPU for use in the kernel on return.
+ */
+_GLOBAL(giveup_fpu)
+ mfmsr r5
+ ori r5,r5,MSR_FP
+ mtmsrd r5 /* enable use of fpu now */
+ isync
+ cmpi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ ld r5,PT_REGS(r3)
+ cmpi 0,r5,0
+ SAVE_32FPRS(0, r3)
+ mffs fr0
+ stfd fr0,THREAD_FPSCR-4(r3)
+ beq 1f
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ li r3,MSR_FP|MSR_FE0|MSR_FE1
+ andc r4,r4,r3 /* disable FP for previous task */
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ LOADBASE(r4,last_task_used_math)
+ std r5,last_task_used_math@l(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called after the master CPU has released the
+ * secondary processors. The execution environment is relocation off.
+ * The Paca for this processor has the following fields initialized at
+ * this point:
+ * 1. Processor number
+ * 2. Segment table pointer (virtual address)
+ * On entry the following are set:
+ * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
+ * r24 = cpu# (in Linux terms)
+ * r25 = Paca virtual address
+ * SPRG3 = Paca virtual address
+ */
+_GLOBAL(__secondary_start)
+
+ HMT_MEDIUM /* Set thread priority to MEDIUM */
+
+ /* set up the TOC (virtual address) */
+ LOADADDR(r2,__toc_start)
+ addi r2,r2,0x4000
+ addi r2,r2,0x4000
+
+ std r2,PACATOC(r25)
+ li r6,0
+ std r6,PACAKSAVE(r25)
+ stb r6,PACAPROCENABLED(r25)
+
+#ifndef CONFIG_PPC_ISERIES
+ /* Initialize the page table pointer register. */
+ LOADADDR(r6,_SDR1)
+ ld r6,0(r6) /* get the value of _SDR1 */
+ mtspr SDR1,r6 /* set the htab location */
+#endif
+ /* Initialize the first segment table (or SLB) entry */
+ ld r3,PACASTABVIRT(r25) /* get addr of segment table */
+ bl .stab_initialize
+
+ /* load current into r13 */
+ ld r13,PACACURRENT(r25)
+
+ /* Initialize the kernel stack. Just a repeat for iSeries. */
+ LOADADDR(r3,current_set)
+ sldi r28,r24,3 /* get current_set[cpu#] */
+ ldx r1,r3,r28
+ addi r1,r1,THREAD_SIZE
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ ld r3,PACASTABREAL(r25) /* get raddr of segment table */
+ ori r4,r3,1 /* turn on valid bit */
+
+#ifdef CONFIG_PPC_ISERIES
+ li r0,-1 /* hypervisor call */
+ li r3,1
+ sldi r3,r3,63 /* 0x8000000000000000 */
+ ori r3,r3,4 /* 0x8000000000000004 */
+ sc /* HvCall_setASR */
+#else
+ mtasr r4 /* set the stab location */
+#endif
+ li r7,0
+ mtlr r7
+
+ /* enable MMU and jump to start_secondary */
+ LOADADDR(r3,.start_secondary_prolog)
+ SET_REG_TO_CONST(r4, MSR_KERNEL)
+#ifdef DO_SOFT_DISABLE
+ ori r4,r4,MSR_EE
+#endif
+ mtspr SRR0,r3
+ mtspr SRR1,r4
+ rfid
+#endif /* CONFIG_SMP */
+
+/*
+ * Running with relocation on at this point. All we want to do is
+ * zero the stack back-chain pointer before going into C code.
+ */
+_GLOBAL(start_secondary_prolog)
+ li r3,0
+ std r3,0(r1) /* Zero the stack frame pointer */
+ bl .start_secondary
+
+/*
+ * This subroutine clobbers r11, r12 and the LR
+ */
+_GLOBAL(enable_64b_mode)
+ mfmsr r11 /* grab the current MSR */
+ li r12,1
+ rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+ or r11,r11,r12
+ li r12,1
+ rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+ or r11,r11,r12
+ mtmsrd r11
+ isync
+ blr
+
+/*
+ * This subroutine clobbers r11, r12 and the LR
+ */
+_GLOBAL(enable_32b_mode)
+ mfmsr r11 /* grab the current MSR */
+ li r12,1
+ rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+ andc r11,r11,r12
+ li r12,1
+ rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+ andc r11,r11,r12
+ mtmsrd r11
+ isync
+ blr
+
+/*
+ * This is where the main kernel code starts.
+ */
+_STATIC(start_here_pSeries)
+ /* get a new offset, now that the kernel has moved. */
+ bl .reloc_offset
+ mr r26,r3
+
+ /* setup the naca pointer which is needed by *tab_initialize */
+ LOADADDR(r6,naca)
+ sub r6,r6,r26 /* addr of the variable naca */
+ li r27,0x4000
+ std r27,0(r6) /* set the value of naca */
+
+#ifdef CONFIG_HMT
+ /* Start up the second thread on cpu 0 */
+ mfspr r3,PVR
+ srwi r3,r3,16
+ cmpwi r3,0x34 /* Pulsar */
+ beq 90f
+ cmpwi r3,0x36 /* Icestar */
+ beq 90f
+ cmpwi r3,0x37 /* SStar */
+ beq 90f
+ b 91f /* HMT not supported */
+90: li r3,0
+ bl .hmt_start_secondary
+91:
+#endif
+
+#ifdef CONFIG_SMP
+ /* All secondary cpus are now spinning on a common
+ * spinloop, release them all now so they can start
+ * to spin on their individual Paca spinloops.
+ * For non SMP kernels, the secondary cpus never
+ * get out of the common spinloop.
+ */
+ li r3,1
+ LOADADDR(r5,__secondary_hold_spinloop)
+ tophys(r4,r5)
+ std r3,0(r4)
+#endif
+
+ /* The following gets the stack and TOC set up with the regs */
+ /* pointing to the real addr of the kernel stack. This is */
+ /* all done to support the C function call below which sets */
+ /* up the htab. This is done because we have relocated the */
+ /* kernel but are still running in real mode. */
+
+ LOADADDR(r3,init_thread_union)
+ sub r3,r3,r26
+
+ /* set up a stack pointer (physical address) */
+ addi r1,r3,THREAD_SIZE
+ li r0,0
+ stdu r0,-STACK_FRAME_OVERHEAD(r1)
+
+ /* set up the TOC (physical address) */
+ LOADADDR(r2,__toc_start)
+ addi r2,r2,0x4000
+ addi r2,r2,0x4000
+ sub r2,r2,r26
+
+ /* Init naca->debug_switch so it can be used in stab & htab init. */
+ bl .ppcdbg_initialize
+
+ /* Get the pointer to the segment table which is used by */
+ /* stab_initialize */
+ li r27,0x4000
+ ld r6,PACA(r27) /* Get the base Paca pointer */
+ sub r6,r6,r26 /* convert to physical addr */
+ mtspr SPRG3,r6 /* PPPBBB: Temp... -Peter */
+ ld r3,PACASTABREAL(r6)
+ ori r4,r3,1 /* turn on valid bit */
+ mtasr r4 /* set the stab location */
+
+ /* Initialize an initial memory mapping and turn on relocation. */
+ bl .stab_initialize
+ bl .htab_initialize
+
+ LOADADDR(r6,_SDR1)
+ sub r6,r6,r26
+ ld r6,0(r6) /* get the value of _SDR1 */
+ mtspr SDR1,r6 /* set the htab location */
+
+ LOADADDR(r3,.start_here_common)
+ SET_REG_TO_CONST(r4, MSR_KERNEL)
+ mtspr SRR0,r3
+ mtspr SRR1,r4
+ rfid
+
+ /* This is where all platforms converge execution */
+_STATIC(start_here_common)
+ /* relocation is on at this point */
+
+ /* Clear out the BSS */
+ LOADADDR(r11,_end)
+
+ LOADADDR(r8,__bss_start)
+
+ sub r11,r11,r8 /* bss size */
+ addi r11,r11,7 /* round up to an even double word */
+ rldicl. r11,r11,61,3 /* shift right by 3 */
+ beq 4f
+ addi r8,r8,-8
+ li r0,0
+ mtctr r11 /* zero this many doublewords */
+3: stdu r0,8(r8)
+ bdnz 3b
+4:
+
+ /* The following code sets up the SP and TOC now that we are */
+ /* running with translation enabled. */
+
+ LOADADDR(r3,init_thread_union)
+
+ /* set up the stack */
+ addi r1,r3,THREAD_SIZE
+ li r0,0
+ stdu r0,-STACK_FRAME_OVERHEAD(r1)
+
+ /* set up the TOC */
+ LOADADDR(r2,__toc_start)
+ addi r2,r2,0x4000
+ addi r2,r2,0x4000
+
+ /* setup the naca pointer */
+ LOADADDR(r9,naca)
+
+ SET_REG_TO_CONST(r8, KERNELBASE)
+ addi r8,r8,0x4000
+ std r8,0(r9) /* set the value of the naca ptr */
+
+ LOADADDR(r4,naca) /* Get Naca ptr address */
+ ld r4,0(r4) /* Get the location of the naca */
+ ld r4,PACA(r4) /* Get the base Paca pointer */
+ mtspr SPRG3,r4
+
+ /* ptr to current */
+ LOADADDR(r13,init_task)
+ std r13,PACACURRENT(r4)
+
+ std r2,PACATOC(r4)
+ li r5,0
+ std r0,PACAKSAVE(r4)
+
+ /* ptr to hardware interrupt stack for processor 0 */
+ LOADADDR(r3, hardware_int_paca0)
+ li r5,0x1000
+ sldi r5,r5,3
+ subi r5,r5,STACK_FRAME_OVERHEAD
+
+ add r3,r3,r5
+ std r3,PACAHRDWINTSTACK(r4)
+
+ li r3,0
+ stb r3,PACAHRDWINTCOUNT(r4)
+
+ /* Restore the parms passed in from the bootloader. */
+ mr r3,r31
+ mr r4,r30
+ mr r5,r29
+ mr r6,r28
+ mr r7,r27
+
+ bl .setup_system
+
+ /* Load up the kernel context */
+5:
+#ifdef DO_SOFT_DISABLE
+ mfspr r4,SPRG3
+ li r5,0
+ stb r5,PACAPROCENABLED(r4) /* Soft Disabled */
+ mfmsr r5
+ ori r5,r5,MSR_EE /* Hard Enabled */
+ mtmsrd r5
+#endif
+
+ bl .start_kernel
+
+_GLOBAL(hmt_init)
+#ifdef CONFIG_HMT
+ LOADADDR(r5, hmt_thread_data)
+ mfspr r7,PVR
+ srwi r7,r7,16
+ cmpwi r7,0x34 /* Pulsar */
+ beq 90f
+ cmpwi r7,0x36 /* Icestar */
+ beq 91f
+ cmpwi r7,0x37 /* SStar */
+ beq 91f
+ b 101f
+90: mfspr r6,PIR
+ andi. r6,r6,0x1f
+ b 92f
+91: mfspr r6,PIR
+ andi. r6,r6,0x3ff
+92: sldi r4,r24,3
+ stwx r6,r5,r4
+ bl .hmt_start_secondary
+ b 101f
+
+__hmt_secondary_hold:
+ LOADADDR(r5, hmt_thread_data)
+ clrldi r5,r5,4
+ li r7,0
+ mfspr r6,PIR
+ mfspr r8,PVR
+ srwi r8,r8,16
+ cmpwi r8,0x34
+ bne 93f
+ andi. r6,r6,0x1f
+ b 103f
+93: andi. r6,r6,0x3f
+
+103: lwzx r8,r5,r7
+ cmpw r8,r6
+ beq 104f
+ addi r7,r7,8
+ b 103b
+
+104: addi r7,r7,4
+ lwzx r9,r5,r7
+ mr r24,r9
+101:
+#endif
+ mr r3,r24
+ b .pseries_secondary_smp_init
+
+#ifdef CONFIG_HMT
+_GLOBAL(hmt_start_secondary)
+ LOADADDR(r4,__hmt_secondary_hold)
+ clrldi r4,r4,4
+ mtspr NIADORM, r4
+ mfspr r4, MSRDORM
+ li r5, -65
+ and r4, r4, r5
+ mtspr MSRDORM, r4
+ lis r4,0xffef
+ ori r4,r4,0x7403
+ mtspr TSC, r4
+ li r4,0x1f4
+ mtspr TST, r4
+ mfspr r4, HID0
+ ori r4, r4, 0x1
+ mtspr HID0, r4
+ mfspr r4, CTRLF
+ oris r4, r4, 0x40
+ mtspr CTRLT, r4
+ blr
+#endif
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the data segment,
+ * which is page-aligned.
+ */
+ .data
+ .align 12
+ .globl sdata
+sdata:
+ .globl empty_zero_page
+empty_zero_page:
+ .space 4096
+
+ .globl swapper_pg_dir
+swapper_pg_dir:
+ .space 4096
+
+ .globl ioremap_dir
+ioremap_dir:
+ .space 4096
+
+ .globl hardware_int_paca0
+hardware_int_paca0:
+ .space 8*4096
+
+/* 1 page segment table per cpu (max 48) */
+ .globl stab_array
+stab_array:
+ .space 4096 * 48
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+ .globl cmd_line
+cmd_line:
+ .space 512
--- /dev/null
+/*
+ *
+ *
+ * PowerPC64 port by Mike Corrigan and Dave Engebretsen
+ * {mikejc|engebret}@us.ibm.com
+ *
+ * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
+ *
+ * Module name: htab.c
+ *
+ * Description:
+ * PowerPC Hashed Page Table functions
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/init.h>
+#include <asm/system.h>
+#include <asm/iSeries/LparData.h>
+#include <linux/spinlock.h>
+#include <asm/ppcdebug.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/sysctl.h>
+#include <linux/ctype.h>
+#include <linux/cache.h>
+#include <asm/uaccess.h>
+#include <asm/Naca.h>
+#include <asm/system.h>
+#include <asm/pmc.h>
+#include <asm/machdep.h>
+#include <asm/lmb.h>
+#ifdef CONFIG_PPC_EEH
+#include <asm/eeh.h>
+#endif
+
+/* For iSeries */
+#include <asm/iSeries/HvCallHpt.h>
+
+/* Note: pte --> Linux PTE
+ * HPTE --> PowerPC Hashed Page Table Entry
+ */
+
+HTAB htab_data = {NULL, 0, 0, 0, 0};
+
+int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
+ void *buffer, size_t *lenp);
+
+void htab_initialize(void);
+void make_pte_LPAR(HPTE *htab,
+ unsigned long va, unsigned long pa, int mode,
+ unsigned long hash_mask, int large);
+
+extern unsigned long reloc_offset(void);
+extern unsigned long get_kernel_vsid( unsigned long ea );
+extern void cacheable_memzero( void *, unsigned int );
+
+extern unsigned long _SDR1;
+extern unsigned long klimit;
+extern struct Naca *naca;
+
+extern unsigned long _ASR;
+extern inline void make_ste(unsigned long stab,
+ unsigned long esid, unsigned long vsid);
+
+extern char _stext[], _etext[], __start_naca[], __end_stab[];
+
+static spinlock_t hash_table_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+
+#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
+#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
+#define RELOC(x) (*PTRRELOC(&(x)))
+
+extern unsigned long htab_size( unsigned long );
+unsigned long hpte_getword0_iSeries( unsigned long slot );
+
+#define KB (1024)
+#define MB (1024*KB)
+static inline void
+create_pte_mapping(unsigned long start, unsigned long end,
+ unsigned long mode, unsigned long mask, int large)
+{
+ unsigned long addr, offset = reloc_offset();
+ HTAB *_htab_data = PTRRELOC(&htab_data);
+ HPTE *htab = (HPTE *)__v2a(_htab_data->htab);
+ unsigned int step;
+
+ if (large)
+ step = 16*MB;
+ else
+ step = 4*KB;
+
+ for (addr = start; addr < end; addr += step) {
+ unsigned long vsid = get_kernel_vsid(addr);
+ unsigned long va = (vsid << 28) | (addr & 0xfffffff);
+ make_pte(htab, va, (unsigned long)__v2a(addr), mode, mask,
+ large);
+ }
+}
+
+void
+htab_initialize(void)
+{
+ unsigned long table, htab_size_bytes;
+ unsigned long pteg_count;
+ unsigned long mode_ro, mode_rw, mask;
+ unsigned long offset = reloc_offset();
+ struct Naca *_naca = RELOC(naca);
+ HTAB *_htab_data = PTRRELOC(&htab_data);
+
+ /*
+ * Calculate the required size of the htab. We want the number of
+ * PTEGs to equal one half the number of real pages.
+ */
+ htab_size_bytes = 1UL << _naca->pftSize;
+ pteg_count = htab_size_bytes >> 7;
+
+ /* For debug, make the HTAB 1/8 as big as it normally would be. */
+ ifppcdebug(PPCDBG_HTABSIZE) {
+ pteg_count >>= 3;
+ htab_size_bytes = pteg_count << 7;
+ }
+
+ _htab_data->htab_num_ptegs = pteg_count;
+ _htab_data->htab_hash_mask = pteg_count - 1;
+
+ if(_machine == _MACH_pSeries) {
+ /* Find storage for the HPT. Must be contiguous in
+ * the absolute address space.
+ */
+ table = lmb_alloc(htab_size_bytes, htab_size_bytes);
+ if ( !table )
+ panic("ERROR, cannot find space for HPTE\n");
+ _htab_data->htab = (HPTE *)__a2v(table);
+
+ /* htab absolute addr + encoded htabsize */
+ RELOC(_SDR1) = table + __ilog2(pteg_count) - 11;
+
+ /* Initialize the HPT with no entries */
+ cacheable_memzero((void *)table, htab_size_bytes);
+ } else {
+ _htab_data->htab = NULL;
+ RELOC(_SDR1) = 0;
+ }
+
+ mode_ro = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RXRX;
+ mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
+ mask = pteg_count-1;
+
+ /* Create PTE's for the kernel text and data sections plus
+ * the HPT and HPTX arrays. Make the assumption that
+ * (addr & KERNELBASE) == 0 (ie they are disjoint).
+ * We also assume that the va is <= 64 bits.
+ */
+#if 0
+ create_pte_mapping((unsigned long)_stext, (unsigned long)__start_naca, mode_ro, mask);
+ create_pte_mapping((unsigned long)__start_naca, (unsigned long)__end_stab, mode_rw, mask);
+ create_pte_mapping((unsigned long)__end_stab, (unsigned long)_etext, mode_ro, mask);
+ create_pte_mapping((unsigned long)_etext, RELOC(klimit), mode_rw, mask);
+ create_pte_mapping((unsigned long)__a2v(table), (unsigned long)__a2v(table+htab_size_bytes), mode_rw, mask);
+#else
+#ifndef CONFIG_PPC_ISERIES
+ if (__is_processor(PV_POWER4) && _naca->physicalMemorySize > 256*MB) {
+ create_pte_mapping((unsigned long)KERNELBASE,
+ KERNELBASE + 256*MB, mode_rw, mask, 0);
+ create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
+ KERNELBASE + (_naca->physicalMemorySize),
+ mode_rw, mask, 1);
+ return;
+ }
+#endif
+ create_pte_mapping((unsigned long)KERNELBASE,
+ KERNELBASE+(_naca->physicalMemorySize),
+ mode_rw, mask, 0);
+#endif
+}
+#undef KB
+#undef MB
+
+/*
+ * Create a pte. Used during initialization only.
+ * We assume the PTE will fit in the primary PTEG.
+ */
+void make_pte(HPTE *htab,
+ unsigned long va, unsigned long pa, int mode,
+ unsigned long hash_mask, int large)
+{
+ HPTE *hptep;
+ unsigned long hash, i;
+ volatile unsigned long x = 1;
+ unsigned long vpn;
+
+#ifdef CONFIG_PPC_PSERIES
+ if(_machine == _MACH_pSeriesLP) {
+ make_pte_LPAR(htab, va, pa, mode, hash_mask, large);
+ return;
+ }
+#endif
+
+ if (large)
+ vpn = va >> 24;
+ else
+ vpn = va >> 12;
+
+ hash = hpt_hash(vpn, large);
+
+ hptep = htab + ((hash & hash_mask)*HPTES_PER_GROUP);
+
+ for (i = 0; i < 8; ++i, ++hptep) {
+ if ( hptep->dw0.dw0.v == 0 ) { /* !valid */
+ hptep->dw1.dword1 = pa | mode;
+ hptep->dw0.dword0 = 0;
+ hptep->dw0.dw0.avpn = va >> 23;
+ hptep->dw0.dw0.bolted = 1; /* bolted */
+ hptep->dw0.dw0.v = 1; /* make valid */
+ return;
+ }
+ }
+
+ /* We should _never_ get here and too early to call xmon. */
+ for(;x;x|=1);
+}
+
+/* Functions to invalidate a HPTE */
+static void hpte_invalidate_iSeries( unsigned long slot )
+{
+ HvCallHpt_invalidateSetSwBitsGet( slot, 0, 0 );
+}
+
+static void hpte_invalidate_pSeries( unsigned long slot )
+{
+ /* Local copy of the first doubleword of the HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+
+ /* Locate the HPTE */
+ HPTE * hptep = htab_data.htab + slot;
+
+ /* Get the first doubleword of the HPTE */
+ hpte_dw0.d = hptep->dw0.dword0;
+
+ /* Invalidate the hpte */
+ hptep->dw0.dword0 = 0;
+
+ /* Invalidate the tlb */
+ {
+ unsigned long vsid, group, pi, pi_high;
+
+ vsid = hpte_dw0.h.avpn >> 5;
+ group = slot >> 3;
+ if(hpte_dw0.h.h) {
+ group = ~group;
+ }
+ pi = (vsid ^ group) & 0x7ff;
+ pi_high = (hpte_dw0.h.avpn & 0x1f) << 11;
+ pi |= pi_high;
+ _tlbie(pi << 12);
+ }
+}
+
+
+/* Select an available HPT slot for a new HPTE
+ * return slot index (if in primary group)
+ * return -slot index (if in secondary group)
+ */
+static long hpte_selectslot_iSeries( unsigned long vpn )
+{
+ HPTE hpte;
+ long ret_slot, orig_slot;
+ unsigned long primary_hash;
+ unsigned long hpteg_slot;
+ unsigned long slot;
+ unsigned i, k;
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+
+ ret_slot = orig_slot = HvCallHpt_findValid( &hpte, vpn );
+ if ( hpte.dw0.dw0.v ) { /* If valid ...what do we do now? */
+ udbg_printf( "hpte_selectslot_iSeries: vpn 0x%016lx already valid at slot 0x%016lx\n", vpn, ret_slot );
+ udbg_printf( "hpte_selectslot_iSeries: returned hpte 0x%016lx 0x%016lx\n", hpte.dw0.dword0, hpte.dw1.dword1 );
+
+ return (0x8000000000000000);
+ /* panic("select_hpte_slot found entry already valid\n"); */
+ }
+ if ( ret_slot == -1 ) { /* -1 indicates no available slots */
+
+ /* No available entry found in secondary group */
+
+ PMC_SW_SYSTEM(htab_capacity_castouts);
+
+ primary_hash = hpt_hash(vpn, 0);
+ hpteg_slot = ( primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+ k = htab_data.next_round_robin++ & 0x7;
+
+ for ( i=0; i<HPTES_PER_GROUP; ++i ) {
+ if ( k == HPTES_PER_GROUP )
+ k = 0;
+ slot = hpteg_slot + k;
+ hpte_dw0.d = hpte_getword0_iSeries( slot );
+ if ( !hpte_dw0.h.bolted ) {
+ hpte_invalidate_iSeries( slot );
+ ret_slot = slot;
+ }
+ ++k;
+ }
+ } else {
+ if ( ret_slot < 0 ) {
+ PMC_SW_SYSTEM(htab_primary_overflows);
+ ret_slot &= 0x7fffffffffffffff;
+ ret_slot = -ret_slot;
+ }
+ }
+ if ( ret_slot == -1 ) {
+ /* No non-bolted entry found in primary group - time to panic */
+ udbg_printf("hpte_selectslot_pSeries - No non-bolted HPTE in group 0x%lx! \n", hpteg_slot/HPTES_PER_GROUP);
+ panic("No non-bolted HPTE in group %lx", (unsigned long)hpteg_slot/HPTES_PER_GROUP);
+ }
+ PPCDBG(PPCDBG_MM, "hpte_selectslot_iSeries: vpn=0x%016lx, orig_slot=0x%016lx, ret_slot=0x%016lx \n",
+ vpn, orig_slot, ret_slot );
+ return ret_slot;
+}
+
+static long hpte_selectslot_pSeries(unsigned long vpn)
+{
+ HPTE * hptep;
+ unsigned long primary_hash;
+ unsigned long hpteg_slot;
+ unsigned i, k;
+
+ /* Search the primary group for an available slot */
+
+ primary_hash = hpt_hash(vpn, 0);
+ hpteg_slot = ( primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+ hptep = htab_data.htab + hpteg_slot;
+
+ for (i=0; i<HPTES_PER_GROUP; ++i) {
+ if ( hptep->dw0.dw0.v == 0 ) {
+ /* If an available slot found, return it */
+ return hpteg_slot + i;
+ }
+ hptep++;
+ }
+
+ /* No available entry found in primary group */
+
+ PMC_SW_SYSTEM(htab_primary_overflows);
+
+ /* Search the secondary group */
+
+ hpteg_slot = ( ~primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+ hptep = htab_data.htab + hpteg_slot;
+
+ for (i=0; i<HPTES_PER_GROUP; ++i) {
+ if ( hptep->dw0.dw0.v == 0 ) {
+ /* If an available slot found, return it */
+ return -(hpteg_slot + i);
+ }
+ hptep++;
+ }
+
+ /* No available entry found in secondary group */
+
+ PMC_SW_SYSTEM(htab_capacity_castouts);
+
+ /* Select an entry in the primary group to replace */
+
+ hpteg_slot = ( primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+ hptep = htab_data.htab + hpteg_slot;
+ k = htab_data.next_round_robin++ & 0x7;
+
+ for (i=0; i<HPTES_PER_GROUP; ++i) {
+ if (k == HPTES_PER_GROUP)
+ k = 0;
+
+ if (!hptep[k].dw0.dw0.bolted) {
+ hpteg_slot += k;
+ /* Invalidate the current entry */
+ ppc_md.hpte_invalidate(hpteg_slot);
+ return hpteg_slot;
+ }
+ ++k;
+ }
+
+ /* No non-bolted entry found in primary group - time to panic */
+ udbg_printf("hpte_selectslot_pSeries - No non-bolted HPTE in group 0x%lx! \n", hpteg_slot/HPTES_PER_GROUP);
+ /* xmon(0); */
+ panic("No non-bolted HPTE in group %lx", (unsigned long)hpteg_slot/HPTES_PER_GROUP);
+
+ /* keep the compiler happy */
+ return 0;
+}
+
+unsigned long hpte_getword0_iSeries( unsigned long slot )
+{
+ unsigned long dword0;
+
+ HPTE hpte;
+ HvCallHpt_get( &hpte, slot );
+ dword0 = hpte.dw0.dword0;
+
+ return dword0;
+}
+
+unsigned long hpte_getword0_pSeries( unsigned long slot )
+{
+ unsigned long dword0;
+ HPTE * hptep = htab_data.htab + slot;
+
+ dword0 = hptep->dw0.dword0;
+ return dword0;
+}
+
+static long hpte_find_iSeries(unsigned long vpn)
+{
+ HPTE hpte;
+ long slot;
+
+ slot = HvCallHpt_findValid( &hpte, vpn );
+ if ( hpte.dw0.dw0.v ) {
+ if ( slot < 0 ) {
+ slot &= 0x7fffffffffffffff;
+ slot = -slot;
+ }
+ } else
+ slot = -1;
+ return slot;
+}
+
+static long hpte_find_pSeries(unsigned long vpn)
+{
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+ long slot;
+ unsigned long hash;
+ unsigned long i,j;
+
+ hash = hpt_hash(vpn, 0);
+ for ( j=0; j<2; ++j ) {
+ slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
+ for ( i=0; i<HPTES_PER_GROUP; ++i ) {
+ hpte_dw0.d = hpte_getword0_pSeries( slot );
+ if ( ( hpte_dw0.h.avpn == ( vpn >> 11 ) ) &&
+ ( hpte_dw0.h.v ) &&
+ ( hpte_dw0.h.h == j ) ) {
+ /* HPTE matches */
+ if ( j )
+ slot = -slot;
+ return slot;
+ }
+ ++slot;
+ }
+ hash = ~hash;
+ }
+ return -1;
+}
+
+/* This function is called by iSeries setup when initializing the hpt */
+void build_valid_hpte( unsigned long vsid, unsigned long ea, unsigned long pa,
+ pte_t * ptep, unsigned hpteflags, unsigned bolted )
+{
+ unsigned long vpn, flags;
+ long hpte_slot;
+ unsigned hash;
+ pte_t pte;
+
+ vpn = ((vsid << 28) | ( ea & 0xffff000 )) >> 12;
+
+ spin_lock_irqsave( &hash_table_lock, flags );
+
+ hpte_slot = ppc_md.hpte_selectslot( vpn );
+ hash = 0;
+ if ( hpte_slot < 0 ) {
+ if ( hpte_slot == 0x8000000000000000 ) {
+ udbg_printf("hash_page: ptep = 0x%016lx\n",
+ (unsigned long)ptep );
+ udbg_printf("hash_page: ea = 0x%016lx\n", ea );
+ udbg_printf("hash_page: vpn = 0x%016lx\n", vpn );
+
+ panic("hash_page: hpte already exists\n");
+ }
+ hash = 1;
+ hpte_slot = -hpte_slot;
+ }
+ ppc_md.hpte_create_valid( hpte_slot, vpn, pa >> 12, hash, ptep,
+ hpteflags, bolted );
+
+ if ( ptep ) {
+ /* Get existing pte flags */
+ pte = *ptep;
+ pte_val(pte) &= ~_PAGE_HPTEFLAGS;
+
+ /* Add in the has hpte flag */
+ pte_val(pte) |= _PAGE_HASHPTE;
+
+ /* Add in the _PAGE_SECONDARY flag */
+ pte_val(pte) |= hash << 15;
+
+ /* Add in the hpte slot */
+ pte_val(pte) |= (hpte_slot << 12) & _PAGE_GROUP_IX;
+
+ /* Save the new pte. */
+ *ptep = pte;
+
+ }
+ spin_unlock_irqrestore( &hash_table_lock, flags );
+}
+
+
+/* Create an HPTE and validate it
+ * It is assumed that the HPT slot currently is invalid.
+ * The HPTE is set with the vpn, rpn (converted to absolute)
+ * and flags
+ */
+static void hpte_create_valid_iSeries(unsigned long slot, unsigned long vpn,
+ unsigned long prpn, unsigned hash,
+ void * ptep, unsigned hpteflags,
+ unsigned bolted )
+{
+ /* Local copy of HPTE */
+ struct {
+ /* Local copy of first doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } dw0;
+ /* Local copy of second doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword1 h;
+ Hpte_dword1_flags f;
+ } dw1;
+ } lhpte;
+
+ unsigned long avpn = vpn >> 11;
+ unsigned long arpn = physRpn_to_absRpn( prpn );
+
+ /* Fill in the local HPTE with absolute rpn, avpn and flags */
+ lhpte.dw1.d = 0;
+ lhpte.dw1.h.rpn = arpn;
+ lhpte.dw1.f.flags = hpteflags;
+
+ lhpte.dw0.d = 0;
+ lhpte.dw0.h.avpn = avpn;
+ lhpte.dw0.h.h = hash;
+ lhpte.dw0.h.bolted = bolted;
+ lhpte.dw0.h.v = 1;
+
+ /* Now fill in the actual HPTE */
+ HvCallHpt_addValidate( slot, hash, (HPTE *)&lhpte );
+}
+
+static void hpte_create_valid_pSeries(unsigned long slot, unsigned long vpn,
+ unsigned long prpn, unsigned hash,
+ void * ptep, unsigned hpteflags,
+ unsigned bolted)
+{
+ /* Local copy of HPTE */
+ struct {
+ /* Local copy of first doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } dw0;
+ /* Local copy of second doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword1 h;
+ Hpte_dword1_flags f;
+ } dw1;
+ } lhpte;
+
+ unsigned long avpn = vpn >> 11;
+ unsigned long arpn = physRpn_to_absRpn( prpn );
+
+ HPTE *hptep;
+
+ /* Fill in the local HPTE with absolute rpn, avpn and flags */
+ lhpte.dw1.d = 0;
+ lhpte.dw1.h.rpn = arpn;
+ lhpte.dw1.f.flags = hpteflags;
+
+ lhpte.dw0.d = 0;
+ lhpte.dw0.h.avpn = avpn;
+ lhpte.dw0.h.h = hash;
+ lhpte.dw0.h.bolted = bolted;
+ lhpte.dw0.h.v = 1;
+
+ /* Now fill in the actual HPTE */
+ hptep = htab_data.htab + slot;
+
+ /* Set the second dword first so that the valid bit
+ * is the last thing set
+ */
+
+ hptep->dw1.dword1 = lhpte.dw1.d;
+
+ /* Guarantee the second dword is visible before
+ * the valid bit
+ */
+
+ __asm__ __volatile__ ("eieio" : : : "memory");
+
+ /* Now set the first dword including the valid bit */
+ hptep->dw0.dword0 = lhpte.dw0.d;
+
+ __asm__ __volatile__ ("ptesync" : : : "memory");
+}
+
+/* find_linux_pte returns the address of a linux pte for a given
+ * effective address and directory. If not found, it returns zero.
+ */
+
+pte_t * find_linux_pte( pgd_t * pgdir, unsigned long ea )
+{
+ pgd_t *pg;
+ pmd_t *pm;
+ pte_t *pt = NULL;
+ pte_t pte;
+ pg = pgdir + pgd_index( ea );
+ if ( ! pgd_none( *pg ) ) {
+
+ pm = pmd_offset( pg, ea );
+ if ( ! pmd_none( *pm ) ) {
+ pt = pte_offset( pm, ea );
+ pte = *pt;
+ if ( ! pte_present( pte ) )
+ pt = NULL;
+ }
+ }
+
+ return pt;
+
+}
+
+static inline unsigned long computeHptePP( unsigned long pte )
+{
+ return ( pte & _PAGE_USER ) |
+ ( ( ( pte & _PAGE_USER ) >> 1 ) &
+ ( ( ~( ( pte >> 2 ) & /* _PAGE_RW */
+ ( pte >> 7 ) ) ) & /* _PAGE_DIRTY */
+ 1 ) );
+}
+
+static void hpte_updatepp_iSeries(long slot, unsigned long newpp, unsigned long va)
+{
+ HvCallHpt_setPp( slot, newpp );
+}
+
+static void hpte_updatepp_pSeries(long slot, unsigned long newpp, unsigned long va)
+{
+ /* Local copy of first doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+
+ /* Local copy of second doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword1 h;
+ Hpte_dword1_flags f;
+ } hpte_dw1;
+
+ HPTE * hptep = htab_data.htab + slot;
+
+ /* Turn off valid bit in HPTE */
+ hpte_dw0.d = hptep->dw0.dword0;
+ hpte_dw0.h.v = 0;
+ hptep->dw0.dword0 = hpte_dw0.d;
+
+ /* Ensure it is out of the tlb too */
+ _tlbie( va );
+
+ /* Insert the new pp bits into the HPTE */
+ hpte_dw1.d = hptep->dw1.dword1;
+ hpte_dw1.h.pp = newpp;
+ hptep->dw1.dword1 = hpte_dw1.d;
+
+ /* Ensure it is visible before validating */
+ __asm__ __volatile__ ("eieio" : : : "memory");
+
+ /* Turn the valid bit back on in HPTE */
+ hpte_dw0.h.v = 1;
+ hptep->dw0.dword0 = hpte_dw0.d;
+
+ __asm__ __volatile__ ("ptesync" : : : "memory");
+}
+
+/*
+ * Update the page protection bits. Intended to be used to create
+ * guard pages for kernel data structures on pages which are bolted
+ * in the HPT. Assumes pages being operated on will not be stolen.
+ */
+void hpte_updateboltedpp_iSeries(unsigned long newpp, unsigned long ea )
+{
+ unsigned long vsid,va,vpn;
+ long slot;
+
+ vsid = get_kernel_vsid( ea );
+ va = ( vsid << 28 ) | ( ea & 0x0fffffff );
+ vpn = va >> PAGE_SHIFT;
+
+ slot = ppc_md.hpte_find( vpn );
+ HvCallHpt_setPp( slot, newpp );
+}
+
+
+static __inline__ void set_pp_bit(unsigned long pp, HPTE *addr)
+{
+ unsigned long old;
+ unsigned long *p = (unsigned long *)(&(addr->dw1));
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%3\n\
+ rldimi %0,%2,0,62\n\
+ stdcx. %0,0,%3\n\
+ bne 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (pp), "r" (p), "m" (*p)
+ : "cc");
+}
+
+/*
+ * Update the page protection bits. Intended to be used to create
+ * guard pages for kernel data structures on pages which are bolted
+ * in the HPT. Assumes pages being operated on will not be stolen.
+ */
+void hpte_updateboltedpp_pSeries(unsigned long newpp, unsigned long ea)
+{
+ unsigned long vsid,va,vpn,flags;
+ long slot;
+ HPTE *hptep;
+
+ vsid = get_kernel_vsid( ea );
+ va = ( vsid << 28 ) | ( ea & 0x0fffffff );
+ vpn = va >> PAGE_SHIFT;
+
+ slot = ppc_md.hpte_find( vpn );
+ hptep = htab_data.htab + slot;
+
+ set_pp_bit(newpp , hptep);
+
+ /* Ensure it is out of the tlb too */
+ spin_lock_irqsave( &hash_table_lock, flags );
+ _tlbie( va );
+ spin_unlock_irqrestore( &hash_table_lock, flags );
+}
+
+
+
+/* This is called very early. */
+void hpte_init_iSeries(void)
+{
+ ppc_md.hpte_invalidate = hpte_invalidate_iSeries;
+ ppc_md.hpte_updatepp = hpte_updatepp_iSeries;
+ ppc_md.hpte_updateboltedpp = hpte_updateboltedpp_iSeries;
+ ppc_md.hpte_getword0 = hpte_getword0_iSeries;
+ ppc_md.hpte_selectslot = hpte_selectslot_iSeries;
+ ppc_md.hpte_create_valid = hpte_create_valid_iSeries;
+ ppc_md.hpte_find = hpte_find_iSeries;
+}
+void hpte_init_pSeries(void)
+{
+ ppc_md.hpte_invalidate = hpte_invalidate_pSeries;
+ ppc_md.hpte_updatepp = hpte_updatepp_pSeries;
+ ppc_md.hpte_updateboltedpp = hpte_updateboltedpp_pSeries;
+ ppc_md.hpte_getword0 = hpte_getword0_pSeries;
+ ppc_md.hpte_selectslot = hpte_selectslot_pSeries;
+ ppc_md.hpte_create_valid = hpte_create_valid_pSeries;
+ ppc_md.hpte_find = hpte_find_pSeries;
+}
+
+/* Handle a fault by adding an HPTE
+ * If the address can't be determined to be valid
+ * via Linux page tables, return 1. If handled
+ * return 0
+ */
+int hash_page( unsigned long ea, unsigned long access )
+{
+ int rc = 1;
+ void * pgdir = NULL;
+ unsigned long va, vsid, vpn;
+ unsigned long newpp, hash_ind, prpn;
+ unsigned long hpteflags, regionid;
+ long slot;
+ struct mm_struct * mm;
+ pte_t old_pte, new_pte, *ptep;
+
+ /* Check for invalid addresses. */
+ if (!IS_VALID_EA(ea)) {
+ return 1;
+ }
+
+ regionid = REGION_ID(ea);
+ switch ( regionid ) {
+ case USER_REGION_ID:
+ mm = current->mm;
+ if ( mm == NULL ) {
+ PPCDBG(PPCDBG_MM, "hash_page returning; mm = 0\n");
+ return 1;
+ }
+ vsid = get_vsid(mm->context, ea );
+ break;
+ case IO_REGION_ID:
+ mm = &ioremap_mm;
+ vsid = get_kernel_vsid( ea );
+ break;
+ case VMALLOC_REGION_ID:
+ mm = &init_mm;
+ vsid = get_kernel_vsid( ea );
+ break;
+#ifdef CONFIG_PPC_EEH
+ case IO_UNMAPPED_REGION_ID:
+ udbg_printf("EEH Error ea = 0x%lx\n", ea);
+ PPCDBG_ENTER_DEBUGGER();
+ panic("EEH Error ea = 0x%lx\n", ea);
+ break;
+#endif
+ case KERNEL_REGION_ID:
+ /* As htab_initialize is now, we shouldn't ever get here since
+ * we're bolting the entire 0xC0... region.
+ */
+ udbg_printf("Little faulted on kernel address 0x%lx\n", ea);
+ PPCDBG_ENTER_DEBUGGER();
+ panic("Little faulted on kernel address 0x%lx\n", ea);
+ break;
+ default:
+ /* Not a valid range, send the problem up to do_page_fault */
+ return 1;
+ break;
+ }
+
+ /* Search the Linux page table for a match with va */
+ va = ( vsid << 28 ) | ( ea & 0x0fffffff );
+ vpn = va >> PAGE_SHIFT;
+ pgdir = mm->pgd;
+ PPCDBG(PPCDBG_MM, "hash_page ea = 0x%16.16lx, va = 0x%16.16lx\n current = 0x%16.16lx, access = %lx\n", ea, va, current, access);
+ if ( pgdir == NULL ) {
+ return 1;
+ }
+
+ /* Lock the Linux page table to prevent mmap and kswapd
+ * from modifying entries while we search and update
+ */
+
+ spin_lock( &mm->page_table_lock );
+
+ ptep = find_linux_pte( pgdir, ea );
+ /* If no pte found, send the problem up to do_page_fault */
+ if ( ! ptep ) {
+ spin_unlock( &mm->page_table_lock );
+ return 1;
+ }
+
+ /* Acquire the hash table lock to guarantee that the linux
+ * pte we fetch will not change
+ */
+ spin_lock( &hash_table_lock );
+
+ old_pte = *ptep;
+
+ /* If the pte is not "present" (valid), send the problem
+ * up to do_page_fault.
+ */
+ if ( ! pte_present( old_pte ) ) {
+ spin_unlock( &hash_table_lock );
+ spin_unlock( &mm->page_table_lock );
+ return 1;
+ }
+
+ /* At this point we have found a pte (which was present).
+ * The spinlocks prevent this status from changing
+ * The hash_table_lock prevents the _PAGE_HASHPTE status
+ * from changing (RPN, DIRTY and ACCESSED too)
+ * The page_table_lock prevents the pte from being
+ * invalidated or modified
+ */
+
+/* At this point, we have a pte (old_pte) which can be used to build or update
+ * an HPTE. There are 5 cases:
+ *
+ * 1. There is a valid (present) pte with no associated HPTE (this is
+ * the most common case)
+ * 2. There is a valid (present) pte with an associated HPTE. The
+ * current values of the pp bits in the HPTE prevent access because the
+ * user doesn't have appropriate access rights.
+ * 3. There is a valid (present) pte with an associated HPTE. The
+ * current values of the pp bits in the HPTE prevent access because we are
+ * doing software DIRTY bit management and the page is currently not DIRTY.
+ * 4. This is a Kernel address (0xC---) for which there is no page directory.
+ * There is an HPTE for this page, but the pp bits prevent access.
+ * Since we always set up kernel pages with R/W access for the kernel
+ * this case only comes about for users trying to access the kernel.
+ * This case is always an error and is not dealt with further here.
+ * 5. This is a Kernel address (0xC---) for which there is no page directory.
+ * There is no HPTE for this page.
+
+ * Check the user's access rights to the page. If access should be prevented
+ * then send the problem up to do_page_fault.
+ */
+
+ access |= _PAGE_PRESENT;
+ if ( 0 == ( access & ~(pte_val(old_pte)) ) ) {
+ /*
+ * Check if pte might have an hpte, but we have
+ * no slot information
+ */
+ if ( pte_val(old_pte) & _PAGE_HPTENOIX ) {
+ unsigned long slot;
+ pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
+ slot = ppc_md.hpte_find( vpn );
+ if ( slot != -1 ) {
+ if ( slot < 0 ) {
+ pte_val(old_pte) |= _PAGE_SECONDARY;
+ slot = -slot;
+ }
+ pte_val(old_pte) |= ((slot << 12) & _PAGE_GROUP_IX) | _PAGE_HASHPTE;
+
+ }
+ }
+
+ /* User has appropriate access rights. */
+ new_pte = old_pte;
+ /* If the attempted access was a store */
+ if ( access & _PAGE_RW )
+ pte_val(new_pte) |= _PAGE_ACCESSED |
+ _PAGE_DIRTY;
+ else
+ pte_val(new_pte) |= _PAGE_ACCESSED;
+
+ /* Only cases 1, 3 and 5 still in play */
+
+ newpp = computeHptePP( pte_val(new_pte) );
+
+ /* Check if pte already has an hpte (case 3) */
+ if ( pte_val(old_pte) & _PAGE_HASHPTE ) {
+ /* There MIGHT be an HPTE for this pte */
+ unsigned long hash, slot, secondary;
+ /* Local copy of first doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+ hash = hpt_hash(vpn, 0);
+ secondary = (pte_val(old_pte) & _PAGE_SECONDARY) >> 15;
+ if ( secondary )
+ hash = ~hash;
+ slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
+ slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
+ /* If there is an HPTE for this page it is indexed by slot */
+ hpte_dw0.d = ppc_md.hpte_getword0( slot );
+ if ( (hpte_dw0.h.avpn == (vpn >> 11) ) &&
+ (hpte_dw0.h.v) &&
+ (hpte_dw0.h.h == secondary ) ){
+ /* HPTE matches */
+ ppc_md.hpte_updatepp( slot, newpp, va );
+ if ( !pte_same( old_pte, new_pte ) )
+ *ptep = new_pte;
+ }
+ else {
+ /* HPTE is not for this pte */
+ pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
+ }
+ }
+ if ( !( pte_val(old_pte) & _PAGE_HASHPTE ) ) {
+ /* Cases 1 and 5 */
+ /* For these cases we need to create a new
+ * HPTE and update the linux pte (for
+ * case 1). For case 5 there is no linux pte.
+ *
+ * Find an available HPTE slot
+ */
+ slot = ppc_md.hpte_selectslot( vpn );
+
+ /* Debug code */
+ if ( slot == 0x8000000000000000 ) {
+ unsigned long xold_pte = pte_val(old_pte);
+ unsigned long xnew_pte = pte_val(new_pte);
+
+ udbg_printf("hash_page: ptep = 0x%016lx\n", (unsigned long)ptep );
+ udbg_printf("hash_page: old_pte = 0x%016lx\n", xold_pte );
+ udbg_printf("hash_page: new_pte = 0x%016lx\n", xnew_pte );
+ udbg_printf("hash_page: ea = 0x%016lx\n", ea );
+ udbg_printf("hash_page: va = 0x%016lx\n", va );
+ udbg_printf("hash_page: access = 0x%016lx\n", access );
+
+ panic("hash_page: hpte already exists\n");
+ }
+
+ hash_ind = 0;
+ if ( slot < 0 ) {
+ slot = -slot;
+ hash_ind = 1;
+ }
+
+ /* Set the physical address */
+ prpn = pte_val(old_pte) >> PTE_SHIFT;
+
+ if ( ptep ) {
+ /* Update the linux pte with the HPTE slot */
+ pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;
+ pte_val(new_pte) |= hash_ind << 15;
+ pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX;
+ pte_val(new_pte) |= _PAGE_HASHPTE;
+ /* No need to use ldarx/stdcx here because all
+ * who might be updating the pte will hold the page_table_lock
+ * or the hash_table_lock (we hold both)
+ */
+ *ptep = new_pte;
+ }
+
+ /* copy appropriate flags from linux pte */
+ hpteflags = (pte_val(new_pte) & 0x1f8) | newpp;
+
+ /* Create the HPTE */
+ ppc_md.hpte_create_valid( slot, vpn, prpn, hash_ind, ptep, hpteflags, 0 );
+
+ }
+
+ /* Indicate success */
+ rc = 0;
+ }
+
+ spin_unlock( &hash_table_lock );
+ if (ptep)
+ spin_unlock( &mm->page_table_lock );
+
+ return rc;
+}
+
+void flush_hash_page( unsigned long context, unsigned long ea, pte_t pte )
+{
+ unsigned long vsid, vpn, va, hash, secondary, slot, flags;
+ /* Local copy of first doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+
+ if ( (ea >= USER_START ) && ( ea <= USER_END ) )
+ vsid = get_vsid( context, ea );
+ else
+ vsid = get_kernel_vsid( ea );
+ va = (vsid << 28) | (ea & 0x0fffffff);
+ vpn = va >> PAGE_SHIFT;
+ hash = hpt_hash(vpn, 0);
+ secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
+ if ( secondary )
+ hash = ~hash;
+ slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
+ slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
+ /* If there is an HPTE for this page it is indexed by slot */
+
+ spin_lock_irqsave( &hash_table_lock, flags);
+ hpte_dw0.d = ppc_md.hpte_getword0( slot );
+ if ( (hpte_dw0.h.avpn == (vpn >> 11) ) &&
+ (hpte_dw0.h.v) &&
+ (hpte_dw0.h.h == secondary ) ){
+ /* HPTE matches */
+ ppc_md.hpte_invalidate( slot );
+ }
+ else {
+ unsigned k;
+ /* Temporarily lets check for the hpte in all possible slots */
+ for ( secondary = 0; secondary < 2; ++secondary ) {
+ hash = hpt_hash(vpn, 0);
+ if ( secondary )
+ hash = ~hash;
+ slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
+ for ( k=0; k<8; ++k ) {
+ hpte_dw0.d = ppc_md.hpte_getword0( slot+k );
+ if ( ( hpte_dw0.h.avpn == (vpn >> 11) ) &&
+ ( hpte_dw0.h.v ) &&
+ ( hpte_dw0.h.h == secondary ) ) {
+ while (1) ;
+ }
+ }
+ }
+
+ }
+ spin_unlock_irqrestore( &hash_table_lock, flags );
+}
+
+int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
+ void *buffer, size_t *lenp)
+{
+ int vleft, first=1, len, left, val;
+#define TMPBUFLEN 256
+ char buf[TMPBUFLEN], *p;
+ static const char *sizestrings[4] = {
+ "2MB", "256KB", "512KB", "1MB"
+ };
+ static const char *clockstrings[8] = {
+ "clock disabled", "+1 clock", "+1.5 clock", "reserved(3)",
+ "+2 clock", "+2.5 clock", "+3 clock", "reserved(7)"
+ };
+ static const char *typestrings[4] = {
+ "flow-through burst SRAM", "reserved SRAM",
+ "pipelined burst SRAM", "pipelined late-write SRAM"
+ };
+ static const char *holdstrings[4] = {
+ "0.5", "1.0", "(reserved2)", "(reserved3)"
+ };
+
+ if ( ((_get_PVR() >> 16) != 8) && ((_get_PVR() >> 16) != 12))
+ return -EFAULT;
+
+ if ( /*!table->maxlen ||*/ (filp->f_pos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+
+ vleft = table->maxlen / sizeof(int);
+ left = *lenp;
+
+ for (; left /*&& vleft--*/; first=0) {
+ if (write) {
+ while (left) {
+ char c;
+ if(get_user(c,(char *) buffer))
+ return -EFAULT;
+ if (!isspace(c))
+ break;
+ left--;
+ ((char *) buffer)++;
+ }
+ if (!left)
+ break;
+ len = left;
+ if (len > TMPBUFLEN-1)
+ len = TMPBUFLEN-1;
+ if(copy_from_user(buf, buffer, len))
+ return -EFAULT;
+ buf[len] = 0;
+ p = buf;
+ if (*p < '0' || *p > '9')
+ break;
+ val = simple_strtoul(p, &p, 0);
+ len = p-buf;
+ if ((len < left) && *p && !isspace(*p))
+ break;
+ buffer += len;
+ left -= len;
+#if 0
+ /* DRENG need a def */
+ _set_L2CR(0);
+ _set_L2CR(val);
+ while ( _get_L2CR() & 0x1 )
+ /* wait for invalidate to finish */;
+#endif
+
+ } else {
+ p = buf;
+ if (!first)
+ *p++ = '\t';
+#if 0
+ /* DRENG need a def */
+ val = _get_L2CR();
+#endif
+ p += sprintf(p, "0x%08x: ", val);
+ p += sprintf(p, " %s", (val >> 31) & 1 ? "enabled" :
+ "disabled");
+ p += sprintf(p, ", %sparity", (val>>30)&1 ? "" : "no ");
+ p += sprintf(p, ", %s", sizestrings[(val >> 28) & 3]);
+ p += sprintf(p, ", %s", clockstrings[(val >> 25) & 7]);
+ p += sprintf(p, ", %s", typestrings[(val >> 23) & 2]);
+ p += sprintf(p, "%s", (val>>22)&1 ? ", data only" : "");
+ p += sprintf(p, "%s", (val>>20)&1 ? ", ZZ enabled": "");
+ p += sprintf(p, ", %s", (val>>19)&1 ? "write-through" :
+ "copy-back");
+ p += sprintf(p, "%s", (val>>18)&1 ? ", testing" : "");
+ p += sprintf(p, ", %sns hold",holdstrings[(val>>16)&3]);
+ p += sprintf(p, "%s", (val>>15)&1 ? ", DLL slow" : "");
+ p += sprintf(p, "%s", (val>>14)&1 ? ", diff clock" :"");
+ p += sprintf(p, "%s", (val>>13)&1 ? ", DLL bypass" :"");
+
+ p += sprintf(p,"\n");
+
+ len = strlen(buf);
+ if (len > left)
+ len = left;
+ if(copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ left -= len;
+ buffer += len;
+ break;
+ }
+ }
+
+ if (!write && !first && left) {
+ if(put_user('\n', (char *) buffer))
+ return -EFAULT;
+ left--, buffer++;
+ }
+ if (write) {
+ p = (char *) buffer;
+ while (left) {
+ char c;
+ if(get_user(c, p++))
+ return -EFAULT;
+ if (!isspace(c))
+ break;
+ left--;
+ }
+ }
+ if (write && first)
+ return -EINVAL;
+ *lenp -= left;
+ filp->f_pos += *lenp;
+ return 0;
+}
+
--- /dev/null
+/*
+ * arch/ppc64/kernel/hvCall.S
+ *
+ *
+ * This file contains the code to perform calls to the
+ * iSeries LPAR hypervisor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ppc_asm.h"
+#include <asm/processor.h>
+#include <linux/config.h>
+
+ .text
+
+/*
+ * Hypervisor call
+ *
+ * Invoke the iSeries hypervisor via the System Call instruction
+ * Parameters are passed to this routine in registers r3 - r10
+ *
+ * r3 contains the HV function to be called
+ * r4-r10 contain the operands to the hypervisor function
+ *
+ */
+
+_GLOBAL(HvCall)
+_GLOBAL(HvCall0)
+_GLOBAL(HvCall1)
+_GLOBAL(HvCall2)
+_GLOBAL(HvCall3)
+_GLOBAL(HvCall4)
+_GLOBAL(HvCall5)
+_GLOBAL(HvCall6)
+_GLOBAL(HvCall7)
+
+
+ mfcr r0
+ std r0,-8(r1)
+ stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
+
+ /* r0 = 0xffffffffffffffff indicates a hypervisor call */
+
+ li r0,-1
+
+ /* Invoke the hypervisor */
+
+ sc
+
+ ld r1,0(r1)
+ ld r0,-8(r1)
+ mtcrf 0xff,r0
+
+ /* return to caller, return value in r3 */
+
+ blr
+
+_GLOBAL(HvCall0Ret16)
+_GLOBAL(HvCall1Ret16)
+_GLOBAL(HvCall2Ret16)
+_GLOBAL(HvCall3Ret16)
+_GLOBAL(HvCall4Ret16)
+_GLOBAL(HvCall5Ret16)
+_GLOBAL(HvCall6Ret16)
+_GLOBAL(HvCall7Ret16)
+
+ mfcr r0
+ std r0,-8(r1)
+ std r31,-16(r1)
+ stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
+
+ mr r31,r4
+ li r0,-1
+ mr r4,r5
+ mr r5,r6
+ mr r6,r7
+ mr r7,r8
+ mr r8,r9
+ mr r9,r10
+
+ sc
+
+ std r3,0(r31)
+ std r4,8(r31)
+
+ mr r3,r5
+
+ ld r1,0(r1)
+ ld r0,-8(r1)
+ mtcrf 0xff,r0
+ ld r31,-16(r1)
+
+ blr
+
+
--- /dev/null
+/*
+ * c 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/cache.h>
+#include <asm/io.h>
+#include "i8259.h"
+#include <linux/irq.h>
+#include <asm/ppcdebug.h>
+
+unsigned char cached_8259[2] = { 0xff, 0xff };
+#define cached_A1 (cached_8259[0])
+#define cached_21 (cached_8259[1])
+
+static spinlock_t i8259_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+
+int i8259_pic_irq_offset;
+
+int i8259_irq(int cpu)
+{
+ int irq;
+
+ spin_lock/*_irqsave*/(&i8259_lock/*, flags*/);
+ /*
+ * Perform an interrupt acknowledge cycle on controller 1
+ */
+ outb(0x0C, 0x20);
+ irq = inb(0x20) & 7;
+ if (irq == 2)
+ {
+ /*
+ * Interrupt is cascaded so perform interrupt
+ * acknowledge on controller 2
+ */
+ outb(0x0C, 0xA0);
+ irq = (inb(0xA0) & 7) + 8;
+ }
+ else if (irq==7)
+ {
+ /*
+ * This may be a spurious interrupt
+ *
+ * Read the interrupt status register. If the most
+ * significant bit is not set then there is no valid
+ * interrupt
+ */
+ outb(0x0b, 0x20);
+ if(~inb(0x20)&0x80) {
+ spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
+ return -1;
+ }
+ }
+ spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
+ return irq;
+}
+
+static void i8259_mask_and_ack_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
+ if ( irq_nr >= i8259_pic_irq_offset )
+ irq_nr -= i8259_pic_irq_offset;
+
+ if (irq_nr > 7) {
+ cached_A1 |= 1 << (irq_nr-8);
+ inb(0xA1); /* DUMMY */
+ outb(cached_A1,0xA1);
+ outb(0x20,0xA0); /* Non-specific EOI */
+ outb(0x20,0x20); /* Non-specific EOI to cascade */
+ } else {
+ cached_21 |= 1 << irq_nr;
+ inb(0x21); /* DUMMY */
+ outb(cached_21,0x21);
+ outb(0x20,0x20); /* Non-specific EOI */
+ }
+ spin_unlock_irqrestore(&i8259_lock, flags);
+}
+
+static void i8259_set_irq_mask(int irq_nr)
+{
+ outb(cached_A1,0xA1);
+ outb(cached_21,0x21);
+}
+
+static void i8259_mask_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
+ if ( irq_nr >= i8259_pic_irq_offset )
+ irq_nr -= i8259_pic_irq_offset;
+ if ( irq_nr < 8 )
+ cached_21 |= 1 << irq_nr;
+ else
+ cached_A1 |= 1 << (irq_nr-8);
+ i8259_set_irq_mask(irq_nr);
+ spin_unlock_irqrestore(&i8259_lock, flags);
+}
+
+static void i8259_unmask_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
+ if ( irq_nr >= i8259_pic_irq_offset )
+ irq_nr -= i8259_pic_irq_offset;
+ if ( irq_nr < 8 )
+ cached_21 &= ~(1 << irq_nr);
+ else
+ cached_A1 &= ~(1 << (irq_nr-8));
+ i8259_set_irq_mask(irq_nr);
+ spin_unlock_irqrestore(&i8259_lock, flags);
+}
+
+static void i8259_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ i8259_unmask_irq(irq);
+}
+
+struct hw_interrupt_type i8259_pic = {
+ " i8259 ",
+ NULL,
+ NULL,
+ i8259_unmask_irq,
+ i8259_mask_irq,
+ i8259_mask_and_ack_irq,
+ i8259_end_irq,
+ NULL
+};
+
+void __init i8259_init(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259_lock, flags);
+ /* init master interrupt controller */
+ outb(0x11, 0x20); /* Start init sequence */
+ outb(0x00, 0x21); /* Vector base */
+ outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
+ outb(0x01, 0x21); /* Select 8086 mode */
+ outb(0xFF, 0x21); /* Mask all */
+ /* init slave interrupt controller */
+ outb(0x11, 0xA0); /* Start init sequence */
+ outb(0x08, 0xA1); /* Vector base */
+ outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
+ outb(0x01, 0xA1); /* Select 8086 mode */
+ outb(0xFF, 0xA1); /* Mask all */
+ outb(cached_A1, 0xA1);
+ outb(cached_21, 0x21);
+ spin_unlock_irqrestore(&i8259_lock, flags);
+ request_irq( i8259_pic_irq_offset + 2, no_action, SA_INTERRUPT,
+ "82c59 secondary cascade", NULL );
+
+}
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _PPC_KERNEL_i8259_H
+#define _PPC_KERNEL_i8259_H
+
+#include "local_irq.h"
+
+extern struct hw_interrupt_type i8259_pic;
+
+void i8259_init(void);
+int i8259_irq(int);
+
+#endif /* _PPC_KERNEL_i8259_H */
--- /dev/null
+/************************************************************************/
+/* This module supports the iSeries I/O Address translation mapping */
+/* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created, December 14, 2000 */
+/* Added Bar table for IoMm performance. */
+/* Ported to ppc64 */
+/* Added dynamic table allocation */
+/* End Change Activity */
+/************************************************************************/
+#include <asm/types.h>
+#include <asm/resource.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <asm/ppcdebug.h>
+#include <asm/flight_recorder.h>
+#include <asm/iSeries/HvCallPci.h>
+#include <asm/iSeries/iSeries_pci.h>
+
+#include "iSeries_IoMmTable.h"
+#include "pci.h"
+
+/*******************************************************************/
+/* Table defines */
+/* Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space. */
+/*******************************************************************/
+#define Max_Entries 1024
+unsigned long iSeries_IoMmTable_Entry_Size = 0x0000000000400000;
+unsigned long iSeries_Base_Io_Memory = 0xE000000000000000;
+unsigned long iSeries_Max_Io_Memory = 0xE000000000000000;
+static long iSeries_CurrentIndex = 0;
+
+/*******************************************************************/
+/* Lookup Tables. */
+/*******************************************************************/
+struct iSeries_Device_Node** iSeries_IoMmTable;
+u8* iSeries_IoBarTable;
+
+/*******************************************************************/
+/* Static and Global variables */
+/*******************************************************************/
+static char* iSeriesPciIoText = "iSeries PCI I/O";
+static spinlock_t iSeriesIoMmTableLock = SPIN_LOCK_UNLOCKED;
+
+/*******************************************************************/
+/* iSeries_IoMmTable_Initialize */
+/*******************************************************************/
+/* Allocates and initalizes the Address Translation Table and Bar */
+/* Tables to get them ready for use. Must be called before any */
+/* I/O space is handed out to the device BARs. */
+/* A follow up method,iSeries_IoMmTable_Status can be called to */
+/* adjust the table after the device BARs have been assiged to */
+/* resize the table. */
+/*******************************************************************/
+void iSeries_IoMmTable_Initialize(void)
+{
+ spin_lock(&iSeriesIoMmTableLock);
+ iSeries_IoMmTable = kmalloc(sizeof(void*)*Max_Entries,GFP_KERNEL);
+ iSeries_IoBarTable = kmalloc(sizeof(u8)*Max_Entries, GFP_KERNEL);
+ spin_unlock(&iSeriesIoMmTableLock);
+ PCIFR("IoMmTable Initialized 0x%p", iSeries_IoMmTable);
+ if(iSeries_IoMmTable == NULL || iSeries_IoBarTable == NULL) {
+ panic("PCI: I/O tables allocation failed.\n");
+ }
+}
+
+/*******************************************************************/
+/* iSeries_IoMmTable_AllocateEntry */
+/*******************************************************************/
+/* Adds pci_dev entry in address translation table */
+/*******************************************************************/
+/* - Allocates the number of entries required in table base on BAR */
+/* size. */
+/* - Allocates starting at iSeries_Base_Io_Memory and increases. */
+/* - The size is round up to be a multiple of entry size. */
+/* - CurrentIndex is incremented to keep track of the last entry. */
+/* - Builds the resource entry for allocated BARs. */
+/*******************************************************************/
+static void iSeries_IoMmTable_AllocateEntry(struct pci_dev* PciDev, int BarNumber)
+{
+ struct resource* BarResource = &PciDev->resource[BarNumber];
+ long BarSize = pci_resource_len(PciDev,BarNumber);
+ /***********************************************************/
+ /* No space to allocate, quick exit, skip Allocation. */
+ /***********************************************************/
+ if(BarSize == 0) return;
+ /***********************************************************/
+ /* Set Resource values. */
+ /***********************************************************/
+ spin_lock(&iSeriesIoMmTableLock);
+ BarResource->name = iSeriesPciIoText;
+ BarResource->start = iSeries_IoMmTable_Entry_Size*iSeries_CurrentIndex;
+ BarResource->start+= iSeries_Base_Io_Memory;
+ BarResource->end = BarResource->start+BarSize-1;
+ /***********************************************************/
+ /* Allocate the number of table entries needed for BAR. */
+ /***********************************************************/
+ while (BarSize > 0 ) {
+ *(iSeries_IoMmTable +iSeries_CurrentIndex) = (struct iSeries_Device_Node*)PciDev->sysdata;
+ *(iSeries_IoBarTable+iSeries_CurrentIndex) = BarNumber;
+ BarSize -= iSeries_IoMmTable_Entry_Size;
+ ++iSeries_CurrentIndex;
+ }
+ iSeries_Max_Io_Memory = (iSeries_IoMmTable_Entry_Size*iSeries_CurrentIndex)+iSeries_Base_Io_Memory;
+ spin_unlock(&iSeriesIoMmTableLock);
+}
+
+/*******************************************************************/
+/* iSeries_allocateDeviceBars */
+/*******************************************************************/
+/* - Allocates ALL pci_dev BAR's and updates the resources with the*/
+/* BAR value. BARS with zero length will have the resources */
+/* The HvCallPci_getBarParms is used to get the size of the BAR */
+/* space. It calls iSeries_IoMmTable_AllocateEntry to allocate */
+/* each entry. */
+/* - Loops through The Bar resourses(0 - 5) including the the ROM */
+/* is resource(6). */
+/*******************************************************************/
+void iSeries_allocateDeviceBars(struct pci_dev* PciDev)
+{
+ struct resource* BarResource;
+ int BarNumber;
+ for(BarNumber = 0; BarNumber <= PCI_ROM_RESOURCE; ++BarNumber) {
+ BarResource = &PciDev->resource[BarNumber];
+ iSeries_IoMmTable_AllocateEntry(PciDev, BarNumber);
+ }
+}
+
+/************************************************************************/
+/* Translates the IoAddress to the device that is mapped to IoSpace. */
+/* This code is inlined, see the iSeries_pci.c file for the replacement.*/
+/************************************************************************/
+struct iSeries_Device_Node* iSeries_xlateIoMmAddress(void* IoAddress)
+{
+ return NULL;
+}
+
+/************************************************************************
+ * Status hook for IoMmTable
+ ************************************************************************/
+void iSeries_IoMmTable_Status(void)
+{
+ PCIFR("IoMmTable......: 0x%p",iSeries_IoMmTable);
+ PCIFR("IoMmTable Range: 0x%p to 0x%p",iSeries_Base_Io_Memory,iSeries_Max_Io_Memory);
+ return;
+}
--- /dev/null
+#ifndef _ISERIES_IOMMTABLE_H
+#define _ISERIES_IOMMTABLE_H
+/************************************************************************/
+/* File iSeries_IoMmTable.h created by Allan Trautman on Dec 12 2001. */
+/************************************************************************/
+/* Interfaces for the write/read Io address translation table. */
+/* Copyright (C) 20yy Allan H Trautman, IBM Corporation */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created December 12, 2000 */
+/* Ported to ppc64, August 30, 2001 */
+/* End Change Activity */
+/************************************************************************/
+
+struct pci_dev;
+struct iSeries_Device_Node;
+
+extern struct iSeries_Device_Node** iSeries_IoMmTable;
+extern u8* iSeries_IoBarTable;
+extern unsigned long iSeries_Base_Io_Memory;
+extern unsigned long iSeries_Max_Io_Memory;
+extern unsigned long iSeries_Base_Io_Memory;
+extern unsigned long iSeries_IoMmTable_Entry_Size;
+/************************************************************************/
+/* iSeries_IoMmTable_Initialize */
+/************************************************************************/
+/* - Initalizes the Address Translation Table and get it ready for use. */
+/* Must be called before any client calls any of the other methods. */
+/* */
+/* Parameters: None. */
+/* */
+/* Return: None. */
+/************************************************************************/
+extern void iSeries_IoMmTable_Initialize(void);
+extern void iSeries_IoMmTable_Status(void);
+
+/************************************************************************/
+/* iSeries_allocateDeviceBars */
+/************************************************************************/
+/* - Allocates ALL pci_dev BAR's and updates the resources with the BAR */
+/* value. BARS with zero length will not have the resources. The */
+/* HvCallPci_getBarParms is used to get the size of the BAR space. */
+/* It calls iSeries_IoMmTable_AllocateEntry to allocate each entry. */
+/* */
+/* Parameters: */
+/* pci_dev = Pointer to pci_dev structure that will be mapped to pseudo */
+/* I/O Address. */
+/* */
+/* Return: */
+/* The pci_dev I/O resources updated with pseudo I/O Addresses. */
+/************************************************************************/
+extern void iSeries_allocateDeviceBars(struct pci_dev* );
+
+/************************************************************************/
+/* iSeries_xlateIoMmAddress */
+/************************************************************************/
+/* - Translates an I/O Memory address to Device Node that has been the */
+/* allocated the psuedo I/O Address. */
+/* */
+/* Parameters: */
+/* IoAddress = I/O Memory Address. */
+/* */
+/* Return: */
+/* An iSeries_Device_Node to the device mapped to the I/O address. The*/
+/* BarNumber and BarOffset are valid if the Device Node is returned. */
+/************************************************************************/
+extern struct iSeries_Device_Node* iSeries_xlateIoMmAddress(void* IoAddress);
+
+#endif /* _ISERIES_IOMMTABLE_H */
--- /dev/null
+/************************************************************************/
+/* File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001. */
+/************************************************************************/
+/* This code gets the card location of the hardware */
+/* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created, Feb 2, 2001 */
+/* Ported to ppc64, August 20, 2001 */
+/* End Change Activity */
+/************************************************************************/
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/types.h>
+#include <asm/resource.h>
+
+#include <asm/iSeries/HvCallPci.h>
+#include <asm/iSeries/HvTypes.h>
+#include <asm/iSeries/mf.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/iSeries/HvCallPci.h>
+//#include <asm/iSeries/iSeries_VpdInfo.h>
+#include <asm/iSeries/iSeries_pci.h>
+#include "pci.h"
+
+/************************************************/
+/* Size of Bus VPD data */
+/************************************************/
+#define BUS_VPDSIZE 1024
+/************************************************/
+/* Bus Vpd Tags */
+/************************************************/
+#define VpdEndOfDataTag 0x78
+#define VpdEndOfAreaTag 0x79
+#define VpdIdStringTag 0x82
+#define VpdVendorAreaTag 0x84
+/************************************************/
+/* Mfg Area Tags */
+/************************************************/
+#define VpdFruFlag 0x4647 // "FG"
+#define VpdFruFrameId 0x4649 // "FI"
+#define VpdSlotMapFormat 0x4D46 // "MF"
+#define VpdAsmPartNumber 0x504E // "PN"
+#define VpdFruSerial 0x534E // "SN"
+#define VpdSlotMap 0x534D // "SM"
+
+/************************************************/
+/* Structures of the areas */
+/************************************************/
+struct MfgVpdAreaStruct {
+ u16 Tag;
+ u8 TagLength;
+ u8 AreaData1;
+ u8 AreaData2;
+};
+typedef struct MfgVpdAreaStruct MfgArea;
+#define MFG_ENTRY_SIZE 3
+
+struct SlotMapStruct {
+ u8 AgentId;
+ u8 SecondaryAgentId;
+ u8 PhbId;
+ char CardLocation[3];
+ char Parms[8];
+ char Reserved[2];
+};
+typedef struct SlotMapStruct SlotMap;
+#define SLOT_ENTRY_SIZE 16
+
+/****************************************************************
+ * *
+ * Bus, Card, Board, FrameId, CardLocation. *
+ ****************************************************************/
+LocationData* iSeries_GetLocationData(struct pci_dev* PciDev)
+{
+ struct iSeries_Device_Node* DevNode = (struct iSeries_Device_Node*)PciDev->sysdata;
+ LocationData* LocationPtr = (LocationData*)kmalloc(LOCATION_DATA_SIZE, GFP_KERNEL);
+ if (LocationPtr == NULL) {
+ printk("PCI: LocationData area allocation failed!\n");
+ return NULL;
+ }
+ memset(LocationPtr,0,LOCATION_DATA_SIZE);
+ LocationPtr->Bus = ISERIES_BUS(DevNode);
+ LocationPtr->Board = DevNode->Board;
+ LocationPtr->FrameId = DevNode->FrameId;
+ LocationPtr->Card = PCI_SLOT(DevNode->DevFn);
+ strcpy(&LocationPtr->CardLocation[0],&DevNode->CardLocation[0]);
+ return LocationPtr;
+}
+
+/************************************************************************/
+/* Formats the device information. */
+/* - Pass in pci_dev* pointer to the device. */
+/* - Pass in buffer to place the data. Danger here is the buffer must */
+/* be as big as the client says it is. Should be at least 128 bytes.*/
+/* Return will the length of the string data put in the buffer. */
+/* Format: */
+/* PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet */
+/* controller */
+/************************************************************************/
+int iSeries_Device_Information(struct pci_dev* PciDev,char* Buffer, int BufferSize)
+{
+ struct iSeries_Device_Node* DevNode = (struct iSeries_Device_Node*)PciDev->sysdata;
+ char* BufPtr = Buffer;
+ int LineLen = 0;
+
+ if (DevNode == NULL) {
+ LineLen = sprintf(BufPtr+LineLen, "PCI: iSeries_Device_Information DevNode is NULL");
+ return LineLen;
+ }
+
+ if (BufferSize >= 128) {
+ LineLen = sprintf(BufPtr+LineLen,"PCI: Bus%3d, Device%3d, Vendor %04X ",
+ ISERIES_BUS(DevNode), PCI_SLOT(PciDev->devfn),PciDev->vendor);
+
+ LineLen += sprintf(BufPtr+LineLen,"Frame%3d, Card %4s ", DevNode->FrameId,DevNode->CardLocation);
+
+ if (pci_class_name(PciDev->class >> 8) == 0) {
+ LineLen += sprintf(BufPtr+LineLen,"0x%04X ",(int)(PciDev->class >> 8));
+ }
+ else {
+ LineLen += sprintf(BufPtr+LineLen,"%s",pci_class_name(PciDev->class >> 8) );
+ }
+ }
+ return LineLen;
+}
+/************************************************************************/
+/* Build a character string of the device location, Frame 1, Card C10 */
+/************************************************************************/
+int device_Location(struct pci_dev* PciDev,char* BufPtr)
+{
+ struct iSeries_Device_Node* DevNode = (struct iSeries_Device_Node*)PciDev->sysdata;
+ return sprintf(BufPtr,"PCI: Bus%3d, Device%3d, Vendor %04X, Location %s",
+ DevNode->DsaAddr.busNumber,
+ DevNode->AgentId,
+ DevNode->Vendor,
+ DevNode->Location);
+}
+
+/*****************************************************************/
+/* Parse the Slot Area */
+/*****************************************************************/
+void iSeries_Parse_SlotArea(SlotMap* MapPtr,int MapLen, struct iSeries_Device_Node* DevNode)
+{
+ int SlotMapLen = MapLen;
+ SlotMap* SlotMapPtr = MapPtr;
+ /*************************************************************/
+ /* Parse Slot label until we find the one requrested */
+ /*************************************************************/
+ while (SlotMapLen > 0) {
+ if (SlotMapPtr->AgentId == DevNode->AgentId ) {
+ /*******************************************************/
+ /* If Phb wasn't found, grab the entry first one found.*/
+ /*******************************************************/
+ if (DevNode->PhbId == 0xff) {
+ DevNode->PhbId = SlotMapPtr->PhbId;
+ }
+ /**************************************************/
+ /* Found it, extract the data. */
+ /**************************************************/
+ if (SlotMapPtr->PhbId == DevNode->PhbId ) {
+ memcpy(&DevNode->CardLocation,&SlotMapPtr->CardLocation,3);
+ DevNode->CardLocation[3] = 0;
+ break;
+ }
+ }
+ /*********************************************************/
+ /* Point to the next Slot */
+ /*********************************************************/
+ SlotMapPtr = (SlotMap*)((char*)SlotMapPtr+SLOT_ENTRY_SIZE);
+ SlotMapLen -= SLOT_ENTRY_SIZE;
+ }
+}
+
+/*****************************************************************/
+/* Parse the Mfg Area */
+/*****************************************************************/
+static void iSeries_Parse_MfgArea(u8* AreaData,int AreaLen, struct iSeries_Device_Node* DevNode)
+{
+ MfgArea* MfgAreaPtr = (MfgArea*)AreaData;
+ int MfgAreaLen = AreaLen;
+ u16 SlotMapFmt = 0;
+
+ /*************************************************************/
+ /* Parse Mfg Data */
+ /*************************************************************/
+ while (MfgAreaLen > 0) {
+ int MfgTagLen = MfgAreaPtr->TagLength;
+ /*******************************************************/
+ /* Frame ID (FI 4649020310 ) */
+ /*******************************************************/
+ if (MfgAreaPtr->Tag == VpdFruFrameId) { /* FI */
+ DevNode->FrameId = MfgAreaPtr->AreaData1;
+ }
+ /*******************************************************/
+ /* Slot Map Format (MF 4D46020004 ) */
+ /*******************************************************/
+ else if (MfgAreaPtr->Tag == VpdSlotMapFormat){ /* MF */
+ SlotMapFmt = (MfgAreaPtr->AreaData1*256)+(MfgAreaPtr->AreaData2);
+ }
+ /*******************************************************/
+ /* Slot Map (SM 534D90 */
+ /*******************************************************/
+ else if (MfgAreaPtr->Tag == VpdSlotMap){ /* SM */
+ SlotMap* SlotMapPtr;
+ if (SlotMapFmt == 0x1004) SlotMapPtr = (SlotMap*)((char*)MfgAreaPtr+MFG_ENTRY_SIZE+1);
+ else SlotMapPtr = (SlotMap*)((char*)MfgAreaPtr+MFG_ENTRY_SIZE);
+ iSeries_Parse_SlotArea(SlotMapPtr,MfgTagLen, DevNode);
+ }
+ /*********************************************************/
+ /* Point to the next Mfg Area */
+ /* Use defined size, sizeof give wrong answer */
+ /*********************************************************/
+ MfgAreaPtr = (MfgArea*)((char*)MfgAreaPtr + MfgTagLen + MFG_ENTRY_SIZE);
+ MfgAreaLen -= (MfgTagLen + MFG_ENTRY_SIZE);
+ }
+}
+
+/*****************************************************************/
+/* Look for "BUS".. Data is not Null terminated. */
+/* PHBID of 0xFF indicates PHB was not found in VPD Data. */
+/*****************************************************************/
+static int iSeries_Parse_PhbId(u8* AreaPtr,int AreaLength)
+{
+ u8* PhbPtr = AreaPtr;
+ int DataLen = AreaLength;
+ char PhbId = 0xFF;
+ while (DataLen > 0) {
+ if (*PhbPtr == 'B' && *(PhbPtr+1) == 'U' && *(PhbPtr+2) == 'S') {
+ PhbPtr += 3;
+ while(*PhbPtr == ' ') ++PhbPtr;
+ PhbId = (*PhbPtr & 0x0F);
+ break;
+ }
+ ++PhbPtr;
+ --DataLen;
+ }
+ return PhbId;
+}
+
+/****************************************************************/
+/* Parse out the VPD Areas */
+/****************************************************************/
+static void iSeries_Parse_Vpd(u8* VpdData, int VpdDataLen, struct iSeries_Device_Node* DevNode)
+{
+ u8* TagPtr = VpdData;
+ int DataLen = VpdDataLen-3;
+ /*************************************************************/
+ /* Parse the Areas */
+ /*************************************************************/
+ while (*TagPtr != VpdEndOfAreaTag && DataLen > 0) {
+ int AreaLen = *(TagPtr+1) + (*(TagPtr+2)*256);
+ u8* AreaData = TagPtr+3;
+
+ if (*TagPtr == VpdIdStringTag) {
+ DevNode->PhbId = iSeries_Parse_PhbId(AreaData,AreaLen);
+ }
+ else if (*TagPtr == VpdVendorAreaTag) {
+ iSeries_Parse_MfgArea(AreaData,AreaLen,DevNode);
+ }
+ /*********************************************************
+ * Point to next Area.
+ *********************************************************/
+ TagPtr = AreaData + AreaLen;
+ DataLen -= AreaLen;
+ }
+}
+
+/****************************************************************
+ * iSeries_Get_Location_Code(struct iSeries_Device_Node*) *
+ *
+ ****************************************************************/
+void iSeries_Get_Location_Code(struct iSeries_Device_Node* DevNode)
+{
+ int BusVpdLen = 0;
+ u8* BusVpdPtr = (u8*)kmalloc(BUS_VPDSIZE, GFP_KERNEL);
+ if (BusVpdPtr == NULL) {
+ printk("PCI: Bus VPD Buffer allocation failure.\n");
+ return;
+ }
+ BusVpdLen = HvCallPci_getBusVpd(ISERIES_BUS(DevNode),REALADDR(BusVpdPtr),BUS_VPDSIZE);
+ if (BusVpdLen == 0) {
+ kfree(BusVpdPtr);
+ printk("PCI: Bus VPD Buffer zero length.\n");
+ return;
+ }
+ //printk("PCI: BusVpdPtr: %p, %d\n",BusVpdPtr, BusVpdLen);
+ /*************************************************************/
+ /* Make sure this is what I think it is */
+ /*************************************************************/
+ if (*BusVpdPtr != VpdIdStringTag) { /*0x82 */
+ printk("PCI: Bus VPD Buffer missing starting tag.\n");
+ kfree(BusVpdPtr);
+ return;
+ }
+ /***************************************************************/
+ /***************************************************************/
+ iSeries_Parse_Vpd(BusVpdPtr,BusVpdLen, DevNode);
+ sprintf(DevNode->Location,"Frame%3d, Card %-4s",DevNode->FrameId,DevNode->CardLocation);
+ kfree(BusVpdPtr);
+}
--- /dev/null
+/************************************************************************/
+/* This module supports the iSeries PCI bus interrupt handling */
+/* Copyright (C) 20yy <Robert L Holtorf> <IBM Corp> */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created, December 13, 2000 by Wayne Holm */
+/* End Change Activity */
+/************************************************************************/
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/blk.h>
+#include <linux/ide.h>
+
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <asm/ppcdebug.h>
+
+#include <asm/iSeries/HvCallPci.h>
+#include <asm/iSeries/HvCallXm.h>
+#include <asm/iSeries/iSeries_irq.h>
+#include <asm/iSeries/XmPciLpEvent.h>
+
+
+hw_irq_controller iSeries_IRQ_handler = {
+ "iSeries irq controller",
+ iSeries_startup_IRQ, /* startup */
+ iSeries_shutdown_IRQ, /* shutdown */
+ iSeries_enable_IRQ, /* enable */
+ iSeries_disable_IRQ, /* disable */
+ NULL, /* ack */
+ iSeries_end_IRQ, /* end */
+ NULL /* set_affinity */
+};
+
+
+struct iSeries_irqEntry {
+ u32 dsa;
+ struct iSeries_irqEntry* next;
+};
+
+struct iSeries_irqAnchor {
+ u8 valid : 1;
+ u8 reserved : 7;
+ u16 entryCount;
+ struct iSeries_irqEntry* head;
+};
+
+struct iSeries_irqAnchor iSeries_irqMap[NR_IRQS];
+
+void iSeries_init_irqMap(int irq);
+
+/* This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c */
+void __init iSeries_init_IRQ(void)
+{
+ int i;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc[i].handler = &iSeries_IRQ_handler;
+ irq_desc[i].status = 0;
+ irq_desc[i].status |= IRQ_DISABLED;
+ irq_desc[i].depth = 1;
+ iSeries_init_irqMap(i);
+ }
+ /* Register PCI event handler and open an event path */
+ PPCDBG(PPCDBG_BUSWALK,"Register PCI event handler and open an event path\n");
+ XmPciLpEvent_init();
+ return;
+}
+
+/**********************************************************************
+ * Called by iSeries_init_IRQ
+ * Prevent IRQs 0 and 255 from being used. IRQ 0 appears in
+ * uninitialized devices. IRQ 255 appears in the PCI interrupt
+ * line register if a PCI error occurs,
+ *********************************************************************/
+void __init iSeries_init_irqMap(int irq)
+{
+ iSeries_irqMap[irq].valid = (irq == 0 || irq == 255)? 0 : 1;
+ iSeries_irqMap[irq].entryCount = 0;
+ iSeries_irqMap[irq].head = NULL;
+}
+
+/* This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot */
+/* It calculates the irq value for the slot. */
+int __init iSeries_allocate_IRQ(HvBusNumber busNumber, HvSubBusNumber subBusNumber, HvAgentId deviceId)
+{
+ u8 idsel = (deviceId >> 4);
+ u8 function = deviceId & 0x0F;
+ int irq = ((((busNumber-1)*16 + (idsel-1)*8 + function)*9/8) % 254) + 1;
+ return irq;
+}
+
+/* This is called out of iSeries_scan_slot to assign the EADS slot to its IRQ number */
+int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber, HvSubBusNumber subBusNumber, HvAgentId deviceId)
+{
+ int rc;
+ u32 dsa = (busNumber << 16) | (subBusNumber << 8) | deviceId;
+ struct iSeries_irqEntry* newEntry;
+ unsigned long flags;
+
+ if (irq < 0 || irq >= NR_IRQS) {
+ return -1;
+ }
+ newEntry = kmalloc(sizeof(*newEntry), GFP_KERNEL);
+ if (newEntry == NULL) {
+ return -ENOMEM;
+ }
+ newEntry->dsa = dsa;
+ newEntry->next = NULL;
+ /********************************************************************
+ * Probably not necessary to lock the irq since allocation is only
+ * done during buswalk, but it should not hurt anything except a
+ * little performance to be smp safe.
+ *******************************************************************/
+ spin_lock_irqsave(&irq_desc[irq].lock, flags);
+
+ if (iSeries_irqMap[irq].valid) {
+ /* Push the new element onto the irq stack */
+ newEntry->next = iSeries_irqMap[irq].head;
+ iSeries_irqMap[irq].head = newEntry;
+ ++iSeries_irqMap[irq].entryCount;
+ rc = 0;
+ PPCDBG(PPCDBG_BUSWALK,"iSeries_assign_IRQ 0x%04X.%02X.%02X = 0x%04X\n",busNumber, subBusNumber, deviceId, irq);
+ }
+ else {
+ printk("PCI: Something is wrong with the iSeries_irqMap. \n");
+ kfree(newEntry);
+ rc = -1;
+ }
+ spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ return rc;
+}
+
+
+/* This is called by iSeries_activate_IRQs */
+unsigned int iSeries_startup_IRQ(unsigned int irq)
+{
+ struct iSeries_irqEntry* entry;
+ u32 bus, subBus, deviceId, function, mask;
+ for(entry=iSeries_irqMap[irq].head; entry!=NULL; entry=entry->next) {
+ bus = (entry->dsa >> 16) & 0xFFFF;
+ subBus = (entry->dsa >> 8) & 0xFF;
+ deviceId = entry->dsa & 0xFF;
+ function = deviceId & 0x0F;
+ /* Link the IRQ number to the bridge */
+ HvCallXm_connectBusUnit(bus, subBus, deviceId, irq);
+ /* Unmask bridge interrupts in the FISR */
+ mask = 0x01010000 << function;
+ HvCallPci_unmaskFisr(bus, subBus, deviceId, mask);
+ PPCDBG(PPCDBG_BUSWALK,"iSeries_activate_IRQ 0x%02X.%02X.%02X Irq:0x%02X\n",bus,subBus,deviceId,irq);
+ }
+ return 0;
+}
+
+/* This is called out of iSeries_fixup to activate interrupt
+ * generation for usable slots */
+void __init iSeries_activate_IRQs()
+{
+ int irq;
+ unsigned long flags;
+ for (irq=0; irq < NR_IRQS; irq++) {
+ spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ irq_desc[irq].handler->startup(irq);
+ spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ }
+}
+
+/* this is not called anywhere currently */
+void iSeries_shutdown_IRQ(unsigned int irq) {
+ struct iSeries_irqEntry* entry;
+ u32 bus, subBus, deviceId, function, mask;
+
+ /* irq should be locked by the caller */
+
+ for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) {
+ bus = (entry->dsa >> 16) & 0xFFFF;
+ subBus = (entry->dsa >> 8) & 0xFF;
+ deviceId = entry->dsa & 0xFF;
+ function = deviceId & 0x0F;
+ /* Invalidate the IRQ number in the bridge */
+ HvCallXm_connectBusUnit(bus, subBus, deviceId, 0);
+ /* Mask bridge interrupts in the FISR */
+ mask = 0x01010000 << function;
+ HvCallPci_maskFisr(bus, subBus, deviceId, mask);
+ }
+
+}
+
+/***********************************************************
+ * This will be called by device drivers (via disable_IRQ)
+ * to disable INTA in the bridge interrupt status register.
+ ***********************************************************/
+void iSeries_disable_IRQ(unsigned int irq)
+{
+ struct iSeries_irqEntry* entry;
+ u32 bus, subBus, deviceId, mask;
+
+ /* The IRQ has already been locked by the caller */
+
+ for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) {
+ bus = (entry->dsa >> 16) & 0xFFFF;
+ subBus = (entry->dsa >> 8) & 0xFF;
+ deviceId = entry->dsa & 0xFF;
+ /* Mask secondary INTA */
+ mask = 0x80000000;
+ HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
+ PPCDBG(PPCDBG_BUSWALK,"iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",bus,subBus,deviceId,irq);
+ }
+}
+
+/***********************************************************
+ * This will be called by device drivers (via enable_IRQ)
+ * to enable INTA in the bridge interrupt status register.
+ ***********************************************************/
+void iSeries_enable_IRQ(unsigned int irq)
+{
+ struct iSeries_irqEntry* entry;
+ u32 bus, subBus, deviceId, mask;
+
+ /* The IRQ has already been locked by the caller */
+ for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) {
+ bus = (entry->dsa >> 16) & 0xFFFF;
+ subBus = (entry->dsa >> 8) & 0xFF;
+ deviceId = entry->dsa & 0xFF;
+ /* Unmask secondary INTA */
+ mask = 0x80000000;
+ HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
+ PPCDBG(PPCDBG_BUSWALK,"iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",bus,subBus,deviceId,irq);
+ }
+}
+
+/* Need to define this so ppc_irq_dispatch_handler will NOT call
+ enable_IRQ at the end of interrupt handling. However, this
+ does nothing because there is not enough information provided
+ to do the EOI HvCall. This is done by XmPciLpEvent.c */
+void iSeries_end_IRQ(unsigned int irq)
+{
+}
+
--- /dev/null
+/*
+ * iSeries_pci.c
+ *
+ * Copyright (C) 2001 Allan Trautman, IBM Corporation
+ *
+ * iSeries specific routines for PCI.
+ *
+ * Based on code from pci.c and iSeries_pci.c 32bit
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/ide.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppcdebug.h>
+#include <asm/Naca.h>
+#include <asm/flight_recorder.h>
+
+#include <asm/iSeries/HvCallPci.h>
+#include <asm/iSeries/HvCallSm.h>
+#include <asm/iSeries/HvCallXm.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/iSeries/iSeries_dma.h>
+#include <asm/iSeries/iSeries_irq.h>
+#include <asm/iSeries/iSeries_pci.h>
+#include <asm/iSeries/mf.h>
+
+#include "iSeries_IoMmTable.h"
+#include "pci.h"
+
+extern struct pci_controller* hose_head;
+extern struct pci_controller** hose_tail;
+extern int global_phb_number;
+extern int panic_timeout;
+
+extern struct Naca *naca;
+extern struct device_node *allnodes;
+extern unsigned long phb_tce_table_init(struct pci_controller *phb);
+extern unsigned long iSeries_Base_Io_Memory;
+
+extern struct pci_ops iSeries_pci_ops;
+extern struct flightRecorder* PciFr;
+extern struct TceTable* tceTables[256];
+
+/*******************************************************************
+ * Counters and control flags.
+ *******************************************************************/
+extern long Pci_Io_Read_Count;
+extern long Pci_Io_Write_Count;
+extern long Pci_Cfg_Read_Count;
+extern long Pci_Cfg_Write_Count;
+extern long Pci_Error_Count;
+
+extern int Pci_Retry_Max;
+extern int Pci_Error_Flag;
+extern int Pci_Trace_Flag;
+
+extern void iSeries_MmIoTest(void);
+
+
+/*******************************************************************
+ * Forward declares of prototypes.
+ *******************************************************************/
+struct iSeries_Device_Node* find_Device_Node(struct pci_dev* PciDev);
+struct iSeries_Device_Node* get_Device_Node(struct pci_dev* PciDev);
+
+unsigned long find_and_init_phbs(void);
+void fixup_resources(struct pci_dev *dev);
+void iSeries_pcibios_fixup(void);
+struct pci_controller* alloc_phb(struct device_node *dev, char *model, unsigned int addr_size_words) ;
+
+void iSeries_Scan_PHBs_Slots(struct pci_controller* Phb);
+void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
+int iSeries_Scan_Bridge_Slot(HvBusNumber Bus, HvSubBusNumber SubBus, int MaxAgents);
+void list_device_nodes(void);
+
+struct pci_dev;
+
+LIST_HEAD(Global_Device_List);
+
+int DeviceCount = 0;
+
+/**********************************************************************************
+ * Log Error infor in Flight Recorder to system Console.
+ * Filter out the device not there errors.
+ * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
+ * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
+ * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
+ **********************************************************************************/
+void pci_Log_Error(char* Error_Text, int Bus, int SubBus, int AgentId, int HvRc)
+{
+ if( HvRc != 0x0302) {
+ char ErrorString[128];
+ sprintf(ErrorString,"%s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",Error_Text,Bus,SubBus,AgentId,HvRc);
+ PCIFR(ErrorString);
+ printk("PCI: %s\n",ErrorString);
+ }
+}
+
+/**********************************************************************************
+ * Dump the iSeries Temp Device Node
+ *<4>buswalk [swapper : - DeviceNode: 0xC000000000634300
+ *<4>00. Device Node = 0xC000000000634300
+ *<4> - PciDev = 0x0000000000000000
+ *<4> - tDevice = 0x 17:01.00 0x1022 00
+ *<4> 4. Device Node = 0xC000000000634480
+ *<4> - PciDev = 0x0000000000000000
+ *<4> - Device = 0x 18:38.16 Irq:0xA7 Vendor:0x1014 Flags:0x00
+ *<4> - Devfn = 0xB0: 22.18
+ **********************************************************************************/
+void dumpDevice_Node(struct iSeries_Device_Node* DevNode)
+{
+ udbg_printf("Device Node = 0x%p\n",DevNode);
+ udbg_printf(" - PciDev = 0x%p\n",DevNode->PciDev);
+ udbg_printf(" - Device = 0x%4X:%02X.%02X (0x%02X)\n",
+ ISERIES_BUS(DevNode),
+ ISERIES_SUBBUS(DevNode),
+ DevNode->AgentId,
+ DevNode->DevFn);
+ udbg_printf(" - DSA = 0x%04X\n",ISERIES_DSA(DevNode)>>32 );
+
+ udbg_printf(" = Irq:0x%02X Vendor:0x%04X Flags:0x%02X\n",
+ DevNode->Irq,
+ DevNode->Vendor,
+ DevNode->Flags );
+ udbg_printf(" - Location = %s\n",DevNode->CardLocation);
+}
+/**********************************************************************************
+ * Walk down the device node chain
+ **********************************************************************************/
+void list_device_nodes(void)
+{
+ struct list_head* Device_Node_Ptr = Global_Device_List.next;
+ while(Device_Node_Ptr != &Global_Device_List) {
+ dumpDevice_Node( (struct iSeries_Device_Node*)Device_Node_Ptr );
+ Device_Node_Ptr = Device_Node_Ptr->next;
+ }
+}
+
+
+/***********************************************************************
+ * build_device_node(u16 Bus, int SubBus, u8 DevFn)
+ *
+ ***********************************************************************/
+struct iSeries_Device_Node* build_device_node(HvBusNumber Bus, HvSubBusNumber SubBus, int AgentId, int Function)
+{
+ struct iSeries_Device_Node* DeviceNode;
+
+ PPCDBG(PPCDBG_BUSWALK,"- "__FUNCTION__" 0x%02X.%02X.%02X Function: %02X\n",Bus,SubBus,AgentId, Function);
+
+ DeviceNode = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
+ if(DeviceNode == NULL) return NULL;
+
+ memset(DeviceNode,0,sizeof(struct iSeries_Device_Node) );
+ list_add_tail(&DeviceNode->Device_List,&Global_Device_List);
+ /*DeviceNode->DsaAddr = ((u64)Bus<<48)+((u64)SubBus<<40)+((u64)0x10<<32); */
+ ISERIES_BUS(DeviceNode) = Bus;
+ ISERIES_SUBBUS(DeviceNode) = SubBus;
+ DeviceNode->DsaAddr.deviceId = 0x10;
+ DeviceNode->DsaAddr.barNumber = 0;
+ DeviceNode->AgentId = AgentId;
+ DeviceNode->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId),Function );
+ DeviceNode->IoRetry = 0;
+ iSeries_Get_Location_Code(DeviceNode);
+ PCIFR("Device 0x%02X.%2X, Node:0x%p ",ISERIES_BUS(DeviceNode),ISERIES_DEVFUN(DeviceNode),DeviceNode);
+ return DeviceNode;
+}
+/****************************************************************************
+*
+* Allocate pci_controller(phb) initialized common variables.
+*
+*****************************************************************************/
+struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types controller_type)
+{
+ struct pci_controller *hose;
+ hose = (struct pci_controller*)kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
+ if(hose == NULL) return NULL;
+
+ memset(hose, 0, sizeof(struct pci_controller));
+ if(strlen(model) < 8) strcpy(hose->what,model);
+ else memcpy(hose->what,model,7);
+ hose->type = controller_type;
+ hose->global_number = global_phb_number;
+ global_phb_number++;
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+ return hose;
+}
+
+/****************************************************************************
+ *
+ * unsigned int __init find_and_init_phbs(void)
+ *
+ * Description:
+ * This function checks for all possible system PCI host bridges that connect
+ * PCI buses. The system hypervisor is queried as to the guest partition
+ * ownership status. A pci_controller is build for any bus which is partially
+ * owned or fully owned by this guest partition.
+ ****************************************************************************/
+unsigned long __init find_and_init_phbs(void)
+{
+ struct pci_controller* phb;
+ HvBusNumber BusNumber;
+
+ PPCDBG(PPCDBG_BUSWALK,__FUNCTION__" Entry\n");
+
+ /* Check all possible buses. */
+ for (BusNumber = 0; BusNumber < 256; BusNumber++) {
+ int RtnCode = HvCallXm_testBus(BusNumber);
+ if (RtnCode == 0) {
+ phb = pci_alloc_pci_controllerX("PHB HV", phb_type_hypervisor);
+ if(phb == NULL) {
+ printk("PCI: Allocate pci_controller failed.\n");
+ PCIFR( "Allocate pci_controller failed.");
+ return -1;
+ }
+ phb->pci_mem_offset = phb->local_number = BusNumber;
+ phb->first_busno = BusNumber;
+ phb->last_busno = BusNumber;
+ phb->ops = &iSeries_pci_ops;
+
+ PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",phb,BusNumber);
+ PCIFR("Create iSeries PHB controller: %04X",BusNumber);
+
+ /***************************************************/
+ /* Find and connect the devices. */
+ /***************************************************/
+ iSeries_Scan_PHBs_Slots(phb);
+ }
+ /* Check for Unexpected Return code, a clue that something */
+ /* has gone wrong. */
+ else if(RtnCode != 0x0301) {
+ PCIFR("Unexpected Return on Probe(0x%04X): 0x%04X",BusNumber,RtnCode);
+ }
+
+ }
+ return 0;
+}
+/***********************************************************************
+ * ppc64_pcibios_init
+ *
+ * Chance to initialize and structures or variable before PCI Bus walk.
+ *
+ *<4>buswalk [swapper : iSeries_pcibios_init Entry.
+ *<4>buswalk [swapper : IoMmTable Initialized 0xC00000000034BD30
+ *<4>buswalk [swapper : find_and_init_phbs Entry
+ *<4>buswalk [swapper : Create iSeries pci_controller:(0xC00000001F5C7000), Bus 0x0017
+ *<4>buswalk [swapper : Connect EADs: 0x17.00.12 = 0x00
+ *<4>buswalk [swapper : iSeries_assign_IRQ 0x0017.00.12 = 0x0091
+ *<4>buswalk [swapper : - allocate and assign IRQ 0x17.00.12 = 0x91
+ *<4>buswalk [swapper : - FoundDevice: 0x17.28.10 = 0x12AE
+ *<4>buswalk [swapper : - build_device_node 0x17.28.12
+ *<4>buswalk [swapper : iSeries_pcibios_init Exit.
+ ***********************************************************************/
+void iSeries_pcibios_init(void)
+{
+ struct pci_controller *phb;
+ PPCDBG(PPCDBG_BUSWALK,__FUNCTION__" Entry.\n");
+
+ iSeries_IoMmTable_Initialize();
+
+ find_and_init_phbs();
+
+ /* Create the TCE Tables */
+ phb = hose_head;
+ while(phb != NULL) {
+ create_pci_bus_tce_table(phb->local_number);
+ PCIFR("Bus 0x%04X TCE Table %p",phb->local_number,tceTables[phb->local_number] );
+ phb = phb->next;
+ }
+
+
+ pci_assign_all_busses = 0;
+ PPCDBG(PPCDBG_BUSWALK,__FUNCTION__" Exit.\n");
+}
+
+/***********************************************************************
+ * iSeries_pcibios_fixup(void)
+ ***********************************************************************/
+void __init iSeries_pcibios_fixup(void)
+{
+ struct pci_dev* PciDev;
+ struct iSeries_Device_Node* DeviceNode;
+ char Buffer[256];
+ int DeviceCount = 0;
+
+ PPCDBG(PPCDBG_BUSWALK,__FUNCTION__" Entry.\n");
+
+ /******************************************************/
+ /* Fix up at the device node and pci_dev relationship */
+ /******************************************************/
+ pci_for_each_dev(PciDev) {
+ DeviceNode = find_Device_Node(PciDev);
+ if(DeviceNode != NULL) {
+ ++DeviceCount;
+ PciDev->sysdata = (void*)DeviceNode;
+ DeviceNode->PciDev = PciDev;
+
+ PPCDBG(PPCDBG_BUSWALK,"PciDev 0x%p <==> DevNode 0x%p\n",PciDev,DeviceNode );
+
+ iSeries_allocateDeviceBars(PciDev);
+
+ PPCDBGCALL(PPCDBG_BUSWALK,dumpPci_Dev(PciDev) );
+
+ iSeries_Device_Information(PciDev,Buffer, sizeof(Buffer) );
+ printk("%d. %s\n",DeviceCount,Buffer);
+
+ } else {
+ printk("PCI: Device Tree not found for 0x%016lX\n",(unsigned long)PciDev);
+ }
+ }
+ iSeries_IoMmTable_Status();
+
+ iSeries_activate_IRQs();
+
+ // This is test code.
+ //mf_displaySrc(0xC9000100);
+ //Pci_IoTest();
+ // Pci_CfgIoTest();
+ // mf_displaySrc(0xC9000500);
+ // Pci_MMIoTest();
+ //mf_displaySrc(0xC9000999);
+}
+/***********************************************************************
+ * iSeries_pcibios_fixup_bus(int Bus)
+ *
+ ***********************************************************************/
+void iSeries_pcibios_fixup_bus(struct pci_bus* PciBus)
+{
+ PPCDBG(PPCDBG_BUSWALK,__FUNCTION__"(0x%04X) Entry.\n",PciBus->number);
+
+}
+/***********************************************************************
+ * find_floppy(void)
+ *
+ * Finds the default floppy device, if the system has one, and returns
+ * the pci_dev for the isa bridge for the floppy device.
+ *
+ * Note: On iSeries there will only be a virtual diskette.
+ ***********************************************************************/
+struct pci_dev*
+find_floppy(void)
+{
+ PPCDBG(PPCDBG_BUSWALK,"- Find Floppy pci_dev.. None on iSeries.\n");
+ return NULL;
+}
+
+
+/***********************************************************************
+ * fixup_resources(struct pci_dev *dev)
+ *
+ ***********************************************************************/
+void fixup_resources(struct pci_dev *PciDev)
+{
+ PPCDBG(PPCDBG_BUSWALK,__FUNCTION__" PciDev %p\n",PciDev);
+}
+
+
+/********************************************************************************
+* Loop through each node function to find usable EADs bridges.
+*********************************************************************************/
+void iSeries_Scan_PHBs_Slots(struct pci_controller* Phb)
+{
+ struct HvCallPci_DeviceInfo* DevInfo;
+ HvBusNumber Bus = Phb->local_number; /* System Bus */
+ HvSubBusNumber SubBus = 0; /* EADs is always 0. */
+ int HvRc = 0;
+ int IdSel = 1;
+ int MaxAgents = 8;
+
+ DevInfo = (struct HvCallPci_DeviceInfo*)kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
+ if(DevInfo == NULL) return;
+
+ /********************************************************************************
+ * Probe for EADs Bridges
+ ********************************************************************************/
+ for (IdSel=1; IdSel < MaxAgents; ++IdSel) {
+ HvRc = HvCallPci_getDeviceInfo(Bus, SubBus, IdSel,REALADDR(DevInfo), sizeof(struct HvCallPci_DeviceInfo));
+ if (HvRc == 0) {
+ if(DevInfo->deviceType == HvCallPci_NodeDevice) {
+ iSeries_Scan_EADs_Bridge(Bus, SubBus, IdSel);
+ }
+ else printk("PCI: Invalid System Configuration(0x%02X.\n",DevInfo->deviceType);
+ }
+ else pci_Log_Error("getDeviceInfo",Bus, SubBus, IdSel,HvRc);
+ }
+ kfree(DevInfo);
+}
+
+
+/********************************************************************************
+*
+*********************************************************************************/
+void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel)
+{
+ struct HvCallPci_BridgeInfo* BridgeInfo;
+ HvAgentId AgentId;
+ int Function;
+ int HvRc;
+
+ BridgeInfo = (struct HvCallPci_BridgeInfo*)kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
+ if(BridgeInfo == NULL) return;
+
+ /*********************************************************************
+ * Note: hvSubBus and irq is always be 0 at this level!
+ *********************************************************************/
+ for (Function=0; Function < 8; ++Function) {
+ AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
+ HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, 0);
+ if (HvRc == 0) {
+ /* Connect EADs: 0x18.00.12 = 0x00 */
+ PPCDBG(PPCDBG_BUSWALK,"PCI:Connect EADs: 0x%02X.%02X.%02X\n",Bus, SubBus, AgentId);
+ PCIFR( "Connect EADs: 0x%02X.%02X.%02X", Bus, SubBus, AgentId);
+ HvRc = HvCallPci_getBusUnitInfo(Bus, SubBus, AgentId,
+ REALADDR(BridgeInfo), sizeof(struct HvCallPci_BridgeInfo));
+ if (HvRc == 0) {
+ PPCDBG(PPCDBG_BUSWALK,"PCI: BridgeInfo, Type: 0x%02X, SubBus 0x%02X, MaxAgents 0x%02X\n",
+ BridgeInfo->busUnitInfo.deviceType,
+ BridgeInfo->subBusNumber,
+ BridgeInfo->maxAgents);
+
+ if (BridgeInfo->busUnitInfo.deviceType == HvCallPci_BridgeDevice) {
+ /* Scan_Bridge_Slot...: 0x18.00.12 */
+ iSeries_Scan_Bridge_Slot(Bus,BridgeInfo->subBusNumber,BridgeInfo->maxAgents);
+ }
+ else printk("PCI: Invalid Bridge Configuration(0x%02X)",BridgeInfo->busUnitInfo.deviceType);
+ }
+ }
+ else if(HvRc != 0x000B) pci_Log_Error("EADs Connect",Bus,SubBus,AgentId,HvRc);
+ }
+ kfree(BridgeInfo);
+}
+
+/********************************************************************************
+*
+* This assumes that the node slot is always on the primary bus!
+*
+*********************************************************************************/
+int iSeries_Scan_Bridge_Slot(HvBusNumber Bus, HvSubBusNumber SubBus, int MaxAgents)
+{
+ struct iSeries_Device_Node* DeviceNode;
+ u16 VendorId = 0;
+ int HvRc = 0;
+ int Irq = 0;
+ int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
+ int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
+ HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
+ HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
+ int FirstSlotId= 0;
+
+ /**********************************************************/
+ /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
+ /**********************************************************/
+ Irq = iSeries_allocate_IRQ(Bus, 0, AgentId);
+ iSeries_assign_IRQ(Irq, Bus, 0, AgentId);
+ PPCDBG(PPCDBG_BUSWALK,"PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",Bus, 0, AgentId, Irq );
+
+ /****************************************************************************
+ * Connect all functions of any device found.
+ ****************************************************************************/
+ for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) {
+ for (Function = 0; Function < 8; ++Function) {
+ AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
+ HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, Irq);
+ if( HvRc == 0) {
+ HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId, PCI_VENDOR_ID, &VendorId);
+ if( HvRc == 0) {
+ /**********************************************************/
+ /* FoundDevice: 0x18.28.10 = 0x12AE */
+ /**********************************************************/
+ HvCallPci_configStore8(Bus, SubBus, AgentId, PCI_INTERRUPT_LINE, Irq);
+ PPCDBG(PPCDBG_BUSWALK,"PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X\n",
+ Bus, SubBus, AgentId, VendorId);
+ ++DeviceCount;
+ PCIFR("Device(%4d): 0x%02X.%02X.%02X",DeviceCount,Bus, SubBus, AgentId);
+ DeviceNode = build_device_node(Bus, SubBus, EADsIdSel, Function);
+ DeviceNode->Vendor = VendorId;
+ DeviceNode->Irq = Irq;
+
+ /***********************************************************
+ * On the first device/function, assign irq to slot
+ ***********************************************************/
+ if(Function == 0) {
+ FirstSlotId = AgentId;
+ // AHT iSeries_assign_IRQ(Irq, Bus, SubBus, AgentId);
+ }
+ }
+ else pci_Log_Error("Read Vendor",Bus,SubBus,AgentId,HvRc);
+ }
+ else pci_Log_Error("Connect Bus Unit",Bus,SubBus, AgentId,HvRc);
+ } /* for (Function = 0; Function < 8; ++Function) */
+ } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
+ return HvRc;
+}
+/************************************************************************/
+/* I/0 Memory copy MUST use mmio commands on iSeries */
+/* To do; For performance, include the hv call directly */
+/************************************************************************/
+void* iSeries_memset(void* dest, char c, size_t Count)
+{
+ u8 ByteValue = c;
+ long NumberOfBytes = Count;
+ char* IoBuffer = dest;
+ while(NumberOfBytes > 0) {
+ iSeries_Write_Byte( ByteValue, (void*)IoBuffer );
+ ++IoBuffer;
+ -- NumberOfBytes;
+ }
+ return dest;
+}
+void* iSeries_memcpy_toio(void *dest, void *source, size_t count)
+{
+ char *dst = dest;
+ char *src = source;
+ long NumberOfBytes = count;
+ while(NumberOfBytes > 0) {
+ iSeries_Write_Byte(*src++, (void*)dst++);
+ -- NumberOfBytes;
+ }
+ return dest;
+}
+void* iSeries_memcpy_fromio(void *dest, void *source, size_t count)
+{
+ char *dst = dest;
+ char *src = source;
+ long NumberOfBytes = count;
+ while(NumberOfBytes > 0) {
+ *dst++ = iSeries_Read_Byte( (void*)src++);
+ -- NumberOfBytes;
+ }
+ return dest;
+}
+/**********************************************************************************
+ * Look down the chain to find the matching Device Device
+ **********************************************************************************/
+struct iSeries_Device_Node* find_Device_Node(struct pci_dev* PciDev)
+{
+ struct list_head* Device_Node_Ptr = Global_Device_List.next;
+ int Bus = PciDev->bus->number;
+ int DevFn = PciDev->devfn;
+
+ while(Device_Node_Ptr != &Global_Device_List) {
+ struct iSeries_Device_Node* DevNode = (struct iSeries_Device_Node*)Device_Node_Ptr;
+ if(Bus == ISERIES_BUS(DevNode) && DevFn == DevNode->DevFn) {
+ return DevNode;
+ }
+ Device_Node_Ptr = Device_Node_Ptr->next;
+ }
+ return NULL;
+}
+/******************************************************************/
+/* Returns the device node for the passed pci_dev */
+/* Sanity Check Node PciDev to passed pci_dev */
+/* If none is found, returns a NULL which the client must handle. */
+/******************************************************************/
+struct iSeries_Device_Node* get_Device_Node(struct pci_dev* PciDev)
+{
+ struct iSeries_Device_Node* Node;
+ Node = (struct iSeries_Device_Node*)PciDev->sysdata;
+ if(Node == NULL ) {
+ Node = find_Device_Node(PciDev);
+ }
+ else if(Node->PciDev != PciDev) {
+ Node = find_Device_Node(PciDev);
+ }
+ return Node;
+}
+/**********************************************************************************
+ *
+ * Read PCI Config Space Code
+ *
+ **********************************************************************************/
+/** BYTE *************************************************************************/
+int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8* ReadValue)
+{
+ u8 ReadData;
+ if(DevNode == NULL) { return 0x301; }
+ ++Pci_Cfg_Read_Count;
+ DevNode->ReturnCode = HvCallPci_configLoad8(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
+ Offset,&ReadData);
+ if(Pci_Trace_Flag == 1) {
+ PCIFR("RCB: 0x%04X.%02X 0x%04X = 0x%02X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
+ }
+ if(DevNode->ReturnCode != 0 ) {
+ printk("PCI: RCB: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ PCIFR( "RCB: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ }
+ *ReadValue = ReadData;
+ return DevNode->ReturnCode;
+}
+/** WORD *************************************************************************/
+int iSeries_Node_read_config_word(struct iSeries_Device_Node* DevNode, int Offset, u16* ReadValue)
+{
+ u16 ReadData;
+ if(DevNode == NULL) { return 0x301; }
+ ++Pci_Cfg_Read_Count;
+ DevNode->ReturnCode = HvCallPci_configLoad16(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
+ Offset,&ReadData);
+ if(Pci_Trace_Flag == 1) {
+ PCIFR("RCW: 0x%04X.%02X 0x%04X = 0x%04X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
+ }
+ if(DevNode->ReturnCode != 0 ) {
+ printk("PCI: RCW: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ PCIFR( "RCW: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+
+ }
+ *ReadValue = ReadData;
+ return DevNode->ReturnCode;
+}
+/** DWORD *************************************************************************/
+int iSeries_Node_read_config_dword(struct iSeries_Device_Node* DevNode, int Offset, u32* ReadValue)
+{
+ u32 ReadData;
+ if(DevNode == NULL) { return 0x301; }
+ ++Pci_Cfg_Read_Count;
+ DevNode->ReturnCode = HvCallPci_configLoad32(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
+ Offset,&ReadData);
+ if(Pci_Trace_Flag == 1) {
+ PCIFR("RCL: 0x%04X.%02X 0x%04X = 0x%08X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
+ }
+ if(DevNode->ReturnCode != 0 ) {
+ printk("PCI: RCL: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ PCIFR( "RCL: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ }
+ *ReadValue = ReadData;
+ return DevNode->ReturnCode;
+}
+int iSeries_pci_read_config_byte(struct pci_dev* PciDev, int Offset, u8* ReadValue) {
+ struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
+ if(DevNode == NULL) return 0x0301;
+ return iSeries_Node_read_config_byte( DevNode ,Offset,ReadValue);
+}
+int iSeries_pci_read_config_word(struct pci_dev* PciDev, int Offset, u16* ReadValue) {
+ struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
+ if(DevNode == NULL) return 0x0301;
+ return iSeries_Node_read_config_word( DevNode ,Offset,ReadValue );
+}
+int iSeries_pci_read_config_dword(struct pci_dev* PciDev, int Offset, u32* ReadValue) {
+ struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
+ if(DevNode == NULL) return 0x0301;
+ return iSeries_Node_read_config_dword(DevNode ,Offset,ReadValue );
+}
+/**********************************************************************************/
+/* */
+/* Write PCI Config Space */
+/* */
+/** BYTE *************************************************************************/
+int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8 WriteData)
+{
+ ++Pci_Cfg_Write_Count;
+ DevNode->ReturnCode = HvCallPci_configStore8(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
+ Offset,WriteData);
+ if(Pci_Trace_Flag == 1) {
+ PCIFR("WCB: 0x%04X.%02X 0x%04X = 0x%02X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
+ }
+ if(DevNode->ReturnCode != 0 ) {
+ printk("PCI: WCB: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ PCIFR( "WCB: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ }
+ return DevNode->ReturnCode;
+}
+/** WORD *************************************************************************/
+int iSeries_Node_write_config_word(struct iSeries_Device_Node* DevNode, int Offset, u16 WriteData)
+{
+ ++Pci_Cfg_Write_Count;
+ DevNode->ReturnCode = HvCallPci_configStore16(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
+ Offset,WriteData);
+ if(Pci_Trace_Flag == 1) {
+ PCIFR("WCW: 0x%04X.%02X 0x%04X = 0x%04X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
+ }
+ if(DevNode->ReturnCode != 0 ) {
+ printk("PCI: WCW: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ PCIFR( "WCW: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ }
+ return DevNode->ReturnCode;
+}
+/** DWORD *************************************************************************/
+int iSeries_Node_write_config_dword(struct iSeries_Device_Node* DevNode, int Offset, u32 WriteData)
+{
+ ++Pci_Cfg_Write_Count;
+ DevNode->ReturnCode = HvCallPci_configStore32(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
+ Offset,WriteData);
+ if(Pci_Trace_Flag == 1) {
+ PCIFR("WCL: 0x%04X.%02X 0x%04X = 0x%08X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
+ }
+ if(DevNode->ReturnCode != 0 ) {
+ printk("PCI: WCL: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ PCIFR( "WCL: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
+ }
+ return DevNode->ReturnCode;
+}
+int iSeries_pci_write_config_byte( struct pci_dev* PciDev,int Offset, u8 WriteValue)
+{
+ struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
+ if(DevNode == NULL) return 0x0301;
+ return iSeries_Node_write_config_byte( DevNode,Offset,WriteValue);
+}
+int iSeries_pci_write_config_word( struct pci_dev* PciDev,int Offset,u16 WriteValue)
+{
+ struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
+ if(DevNode == NULL) return 0x0301;
+ return iSeries_Node_write_config_word( DevNode,Offset,WriteValue);
+}
+int iSeries_pci_write_config_dword(struct pci_dev* PciDev,int Offset,u32 WriteValue)
+{
+ struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
+ if(DevNode == NULL) return 0x0301;
+ return iSeries_Node_write_config_dword(DevNode,Offset,WriteValue);
+}
+
+/************************************************************************/
+/* Branch Table */
+/************************************************************************/
+struct pci_ops iSeries_pci_ops = {
+ iSeries_pci_read_config_byte,
+ iSeries_pci_read_config_word,
+ iSeries_pci_read_config_dword,
+ iSeries_pci_write_config_byte,
+ iSeries_pci_write_config_word,
+ iSeries_pci_write_config_dword
+};
+
+/************************************************************************
+ * Check Return Code
+ * -> On Failure, print and log information.
+ * Increment Retry Count, if exceeds max, panic partition.
+ * -> If in retry, print and log success
+ ************************************************************************
+ * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
+ * PCI: Device 23.90 ReadL Retry( 1)
+ * PCI: Device 23.90 ReadL Retry Successful(1)
+ ************************************************************************/
+int CheckReturnCode(char* TextHdr, struct iSeries_Device_Node* DevNode, u64 RtnCode)
+{
+ if(RtnCode != 0) {
+ ++Pci_Error_Count;
+ ++DevNode->IoRetry;
+ PCIFR( "%s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X",
+ TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry,(int)RtnCode);
+ printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
+ TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry,(int)RtnCode);
+ /*******************************************************/
+ /* Bump the retry and check for retry count exceeded. */
+ /* If, Exceeded, panic the system. */
+ /*******************************************************/
+ if(DevNode->IoRetry > Pci_Retry_Max && Pci_Error_Flag > 0 ) {
+ mf_displaySrc(0xB6000103);
+ panic_timeout = 0;
+ panic("PCI: Hardware I/O Error, SRC B6000103, Automatic Reboot Disabled.\n");
+ }
+ return -1; /* Retry Try */
+ }
+ /********************************************************************
+ * If retry was in progress, log success and rest retry count *
+ *********************************************************************/
+ else if(DevNode->IoRetry > 0) {
+ PCIFR("%s: Device 0x%04X:%02X Retry Successful(%2d).",
+ TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry);
+ DevNode->IoRetry = 0;
+ return 0;
+ }
+ return 0;
+}
+/************************************************************************/
+/* Translate the I/O Address into a device node, bar, and bar offset. */
+/* Note: Make sure the passed variable end up on the stack to avoid */
+/* the exposure of being device global. */
+/************************************************************************/
+static inline struct iSeries_Device_Node* xlateIoMmAddress(void* IoAddress,
+ union HvDsaMap* DsaPtr,
+ u64* BarOffsetPtr) {
+
+ unsigned long BaseIoAddr = (unsigned long)IoAddress-iSeries_Base_Io_Memory;
+ long TableIndex = BaseIoAddr/iSeries_IoMmTable_Entry_Size;
+ struct iSeries_Device_Node* DevNode = *(iSeries_IoMmTable +TableIndex);
+ if(DevNode != NULL) {
+ DsaPtr->DsaAddr = ISERIES_DSA(DevNode);
+ DsaPtr->Dsa.barNumber = *(iSeries_IoBarTable+TableIndex);
+ *BarOffsetPtr = BaseIoAddr % iSeries_IoMmTable_Entry_Size;
+ }
+ else {
+ panic("PCI: Invalid PCI IoAddress detected!\n");
+ }
+ return DevNode;
+}
+
+/************************************************************************/
+/* Read MM I/O Instructions for the iSeries */
+/* On MM I/O error, all ones are returned and iSeries_pci_IoError is cal*/
+/* else, data is returned in big Endian format. */
+/************************************************************************/
+/* iSeries_Read_Byte = Read Byte ( 8 bit) */
+/* iSeries_Read_Word = Read Word (16 bit) */
+/* iSeries_Read_Long = Read Long (32 bit) */
+/************************************************************************/
+u8 iSeries_Read_Byte(void* IoAddress)
+{
+ u64 BarOffset;
+ union HvDsaMap DsaData;
+ struct HvCallPci_LoadReturn Return;
+ struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
+
+ do {
+ ++Pci_Io_Read_Count;
+ HvCall3Ret16(HvCallPciBarLoad8, &Return, DsaData.DsaAddr,BarOffset, 0);
+ } while (CheckReturnCode("RDB",DevNode, Return.rc) != 0);
+
+ if(Pci_Trace_Flag == 1) PCIFR("RDB: IoAddress 0x%p = 0x%02X",IoAddress, (u8)Return.value);
+ return (u8)Return.value;
+}
+u16 iSeries_Read_Word(void* IoAddress)
+{
+ u64 BarOffset;
+ union HvDsaMap DsaData;
+ struct HvCallPci_LoadReturn Return;
+ struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
+
+ do {
+ ++Pci_Io_Read_Count;
+ HvCall3Ret16(HvCallPciBarLoad16,&Return, DsaData.DsaAddr,BarOffset, 0);
+ } while (CheckReturnCode("RDW",DevNode, Return.rc) != 0);
+
+ if(Pci_Trace_Flag == 1) PCIFR("RDW: IoAddress 0x%p = 0x%04X",IoAddress, swab16((u16)Return.value));
+ return swab16((u16)Return.value);
+}
+u32 iSeries_Read_Long(void* IoAddress)
+{
+ u64 BarOffset;
+ union HvDsaMap DsaData;
+ struct HvCallPci_LoadReturn Return;
+ struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
+
+ do {
+ ++Pci_Io_Read_Count;
+ HvCall3Ret16(HvCallPciBarLoad32,&Return, DsaData.DsaAddr,BarOffset, 0);
+ } while (CheckReturnCode("RDL",DevNode, Return.rc) != 0);
+
+ if(Pci_Trace_Flag == 1) PCIFR("RDL: IoAddress 0x%p = 0x%04X",IoAddress, swab32((u32)Return.value));
+ return swab32((u32)Return.value);
+}
+/************************************************************************/
+/* Write MM I/O Instructions for the iSeries */
+/************************************************************************/
+/* iSeries_Write_Byte = Write Byte (8 bit) */
+/* iSeries_Write_Word = Write Word(16 bit) */
+/* iSeries_Write_Long = Write Long(32 bit) */
+/************************************************************************/
+void iSeries_Write_Byte(u8 Data, void* IoAddress)
+{
+ u64 BarOffset;
+ union HvDsaMap DsaData;
+ struct HvCallPci_LoadReturn Return;
+ struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
+
+ do {
+ ++Pci_Io_Write_Count;
+ Return.rc = HvCall4(HvCallPciBarStore8, DsaData.DsaAddr,BarOffset, Data, 0);
+ } while (CheckReturnCode("WWB",DevNode, Return.rc) != 0);
+ if(Pci_Trace_Flag == 1) PCIFR("WWB: IoAddress 0x%p = 0x%02X",IoAddress,Data);
+}
+void iSeries_Write_Word(u16 Data, void* IoAddress)
+{
+ u64 BarOffset;
+ union HvDsaMap DsaData;
+ struct HvCallPci_LoadReturn Return;
+ struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
+
+ do {
+ ++Pci_Io_Write_Count;
+ Return.rc = HvCall4(HvCallPciBarStore16,DsaData.DsaAddr,BarOffset, swab16(Data), 0);
+ } while (CheckReturnCode("WWW",DevNode, Return.rc) != 0);
+ if(Pci_Trace_Flag == 1) PCIFR("WWW: IoAddress 0x%p = 0x%04X",IoAddress,Data);
+}
+void iSeries_Write_Long(u32 Data, void* IoAddress)
+{
+ u64 BarOffset;
+ union HvDsaMap DsaData;
+ struct HvCallPci_LoadReturn Return;
+ struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
+
+ do {
+ ++Pci_Io_Write_Count;
+ Return.rc = HvCall4(HvCallPciBarStore32,DsaData.DsaAddr,BarOffset, swab32(Data), 0);
+ } while (CheckReturnCode("WWL",DevNode, Return.rc) != 0);
+ if(Pci_Trace_Flag == 1) PCIFR("WWL: IoAddress 0x%p = 0x%08X",IoAddress, Data);
+}
+/*
+ * This is called very early before the page table is setup.
+ * There are warnings here because of type mismatches.. Okay for now. AHT
+ */
+void
+iSeries_pcibios_init_early(void)
+{
+ //ppc_md.pcibios_read_config_byte = iSeries_Node_read_config_byte;
+ //ppc_md.pcibios_read_config_word = iSeries_Node_read_config_word;
+ //ppc_md.pcibios_read_config_dword = iSeries_Node_read_config_dword;
+ //ppc_md.pcibios_write_config_byte = iSeries_Node_write_config_byte;
+ //ppc_md.pcibios_write_config_word = iSeries_Node_write_config_word;
+ //ppc_md.pcibios_write_config_dword = iSeries_Node_write_config_dword;
+}
+
+/************************************************************************/
+/* Set the slot reset line to the state passed in. */
+/* This is the platform specific for code for the pci_reset_device */
+/* function. */
+/************************************************************************/
+int pci_set_reset(struct pci_dev* PciDev, int State) {
+ struct iSeries_Device_Node* DeviceNode = (struct iSeries_Device_Node*)PciDev->sysdata;
+ if (DeviceNode == NULL) {
+ printk("PCI: Pci Reset Failed, Device Node not found for pci_dev %p\n",PciDev);
+ return -1;
+ }
+ DeviceNode->ReturnCode = HvCallPci_setSlotReset(ISERIES_BUS(DeviceNode),0x00,DeviceNode->AgentId,State);
+ return DeviceNode->ReturnCode;
+}
--- /dev/null
+/************************************************************************/
+/* File iSeries_pci_reset.c created by Allan Trautman on Mar 21 2001. */
+/************************************************************************/
+/* This code supports the pci interface on the IBM iSeries systems. */
+/* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created, March 20, 2001 */
+/* April 30, 2001, Added return codes on functions. */
+/* September 10, 2001, Ported to ppc64. */
+/* End Change Activity */
+/************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+
+#include <asm/io.h>
+#include <asm/init.h>
+#include <asm/iSeries/HvCallPci.h>
+#include <asm/iSeries/HvTypes.h>
+#include <asm/iSeries/mf.h>
+#include <asm/flight_recorder.h>
+#include <asm/pci.h>
+
+#include <asm/iSeries/iSeries_pci.h>
+#include "pci.h"
+
+/************************************************************************/
+/* Interface to toggle the reset line */
+/* Time is in .1 seconds, need for seconds. */
+/************************************************************************/
+int iSeries_Device_ToggleReset(struct pci_dev* PciDev, int AssertTime, int DelayTime)
+{
+ unsigned long AssertDelay, WaitDelay;
+ struct iSeries_Device_Node* DeviceNode = (struct iSeries_Device_Node*)PciDev->sysdata;
+ if (DeviceNode == NULL) {
+ printk("PCI: Pci Reset Failed, Device Node not found for pci_dev %p\n",PciDev);
+ return -1;
+ }
+ /********************************************************************
+ * Set defaults, Assert is .5 second, Wait is 3 seconds.
+ ********************************************************************/
+ if (AssertTime == 0) AssertDelay = ( 5 * HZ)/10;
+ else AssertDelay = (AssertTime*HZ)/10;
+ if (WaitDelay == 0) WaitDelay = (30 * HZ)/10;
+ else WaitDelay = (DelayTime* HZ)/10;
+
+ /********************************************************************
+ * Assert reset
+ ********************************************************************/
+ DeviceNode->ReturnCode = HvCallPci_setSlotReset(ISERIES_BUS(DeviceNode),0x00,DeviceNode->AgentId,1);
+ if (DeviceNode->ReturnCode == 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(AssertDelay); /* Sleep for the time */
+ DeviceNode->ReturnCode = HvCallPci_setSlotReset(ISERIES_BUS(DeviceNode),0x00,DeviceNode->AgentId, 0);
+
+ /***************************************************************
+ * Wait for device to reset
+ ***************************************************************/
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(WaitDelay);
+ }
+ if (DeviceNode->ReturnCode == 0) {
+ PCIFR("Slot 0x%04X.%02 Reset\n",ISERIES_BUS(DeviceNode),DeviceNode->AgentId );
+ }
+ else {
+ printk("PCI: Slot 0x%04X.%02X Reset Failed, RCode: %04X\n",ISERIES_BUS(DeviceNode),DeviceNode->AgentId,DeviceNode->ReturnCode);
+ PCIFR( "Slot 0x%04X.%02X Reset Failed, RCode: %04X\n",ISERIES_BUS(DeviceNode),DeviceNode->AgentId,DeviceNode->ReturnCode);
+ }
+ return DeviceNode->ReturnCode;
+}
--- /dev/null
+/*
+ * iSeries_proc.c
+ * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity: */
+/* End Change Activity */
+
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#ifndef _ISERIES_PROC_H
+#include <asm/iSeries/iSeries_proc.h>
+#endif
+
+
+static struct proc_dir_entry * iSeries_proc_root = NULL;
+static int iSeries_proc_initializationDone = 0;
+static spinlock_t iSeries_proc_lock;
+
+struct iSeries_proc_registration
+{
+ struct iSeries_proc_registration *next;
+ iSeriesProcFunction functionMember;
+};
+
+
+struct iSeries_proc_registration preallocated[16];
+#define MYQUEUETYPE(T) struct MYQueue##T
+#define MYQUEUE(T) \
+MYQUEUETYPE(T) \
+{ \
+ struct T *head; \
+ struct T *tail; \
+}
+#define MYQUEUECTOR(q) do { (q)->head = NULL; (q)->tail = NULL; } while(0)
+#define MYQUEUEENQ(q, p) \
+do { \
+ (p)->next = NULL; \
+ if ((q)->head != NULL) { \
+ (q)->head->next = (p); \
+ (q)->head = (p); \
+ } else { \
+ (q)->tail = (q)->head = (p); \
+ } \
+} while(0)
+
+#define MYQUEUEDEQ(q,p) \
+do { \
+ (p) = (q)->tail; \
+ if ((p) != NULL) { \
+ (q)->tail = (p)->next; \
+ (p)->next = NULL; \
+ } \
+ if ((q)->tail == NULL) \
+ (q)->head = NULL; \
+} while(0)
+MYQUEUE(iSeries_proc_registration);
+typedef MYQUEUETYPE(iSeries_proc_registration) aQueue;
+
+
+aQueue iSeries_free;
+aQueue iSeries_queued;
+
+void iSeries_proc_early_init(void)
+{
+ int i = 0;
+ unsigned long flags;
+ iSeries_proc_initializationDone = 0;
+ spin_lock_init(&iSeries_proc_lock);
+ MYQUEUECTOR(&iSeries_free);
+ MYQUEUECTOR(&iSeries_queued);
+
+ spin_lock_irqsave(&iSeries_proc_lock, flags);
+ for (i = 0; i < 16; ++i) {
+ MYQUEUEENQ(&iSeries_free, preallocated+i);
+ }
+ spin_unlock_irqrestore(&iSeries_proc_lock, flags);
+}
+
+void iSeries_proc_create(void)
+{
+ unsigned long flags;
+ struct iSeries_proc_registration *reg = NULL;
+ spin_lock_irqsave(&iSeries_proc_lock, flags);
+ printk("iSeries_proc: Creating /proc/iSeries\n");
+
+ iSeries_proc_root = proc_mkdir("iSeries", 0);
+ if (!iSeries_proc_root) return;
+
+ MYQUEUEDEQ(&iSeries_queued, reg);
+
+ while (reg != NULL) {
+ (*(reg->functionMember))(iSeries_proc_root);
+
+ MYQUEUEDEQ(&iSeries_queued, reg);
+ }
+
+ iSeries_proc_initializationDone = 1;
+ spin_unlock_irqrestore(&iSeries_proc_lock, flags);
+}
+
+void iSeries_proc_callback(iSeriesProcFunction initFunction)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&iSeries_proc_lock, flags);
+
+ if (iSeries_proc_initializationDone) {
+ (*initFunction)(iSeries_proc_root);
+ } else {
+ struct iSeries_proc_registration *reg = NULL;
+
+ MYQUEUEDEQ(&iSeries_free, reg);
+
+ if (reg != NULL) {
+ /* printk("Registering %p in reg %p\n", initFunction, reg); */
+ reg->functionMember = initFunction;
+
+ MYQUEUEENQ(&iSeries_queued, reg);
+ } else {
+ printk("Couldn't get a queue entry\n");
+ }
+ }
+
+ spin_unlock_irqrestore(&iSeries_proc_lock, flags);
+}
+
+
--- /dev/null
+/*
+ *
+ *
+ * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
+ * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
+ *
+ * Module name: iSeries_setup.c
+ *
+ * Description:
+ * Architecture- / platform-specific boot-time initialization code for
+ * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
+ * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
+ * <dan@net4x.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/blk.h>
+
+#include <asm/processor.h>
+#include <asm/machdep.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+
+#include <asm/time.h>
+#include "iSeries_setup.h"
+#include <asm/Naca.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/iSeries/HvCallHpt.h>
+#include <asm/iSeries/HvLpConfig.h>
+#include <asm/iSeries/HvCallEvent.h>
+#include <asm/iSeries/HvCallSm.h>
+#include <asm/iSeries/HvCallXm.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/IoHriMainStore.h>
+#include <asm/iSeries/iSeries_proc.h>
+#include <asm/proc_pmc.h>
+#include <asm/iSeries/mf.h>
+
+/* Function Prototypes */
+
+extern void abort(void);
+#ifdef CONFIG_PPC_ISERIES
+static void build_iSeries_Memory_Map( void );
+static void setup_iSeries_cache_sizes( void );
+static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
+#endif
+void build_valid_hpte( unsigned long vsid, unsigned long ea, unsigned long pa,
+ pte_t * ptep, unsigned hpteflags, unsigned bolted );
+extern void ppcdbg_initialize(void);
+extern void iSeries_pcibios_init(void);
+extern void iSeries_pcibios_fixup(void);
+extern void iSeries_pcibios_fixup_bus(int);
+static void iSeries_setup_dprofile(void);
+
+/* Global Variables */
+
+static unsigned long procFreqHz = 0;
+static unsigned long procFreqMhz = 0;
+static unsigned long procFreqMhzHundreths = 0;
+
+static unsigned long tbFreqHz = 0;
+static unsigned long tbFreqMhz = 0;
+static unsigned long tbFreqMhzHundreths = 0;
+
+unsigned long dprof_shift = 0;
+unsigned long dprof_len = 0;
+unsigned int * dprof_buffer = NULL;
+
+int piranha_simulator = 0;
+
+extern char _end[];
+
+extern struct Naca *naca;
+extern int rd_size; /* Defined in drivers/block/rd.c */
+extern unsigned long klimit;
+extern unsigned long embedded_sysmap_start;
+extern unsigned long embedded_sysmap_end;
+
+extern unsigned long iSeries_recal_tb;
+extern unsigned long iSeries_recal_titan;
+
+extern char _stext;
+extern char _etext;
+
+static int mf_initialized = 0;
+
+struct MemoryBlock {
+ unsigned long absStart;
+ unsigned long absEnd;
+ unsigned long logicalStart;
+ unsigned long logicalEnd;
+};
+
+/*
+ * Process the main store vpd to determine where the holes in memory are
+ * and return the number of physical blocks and fill in the array of
+ * block data.
+ */
+
+unsigned long iSeries_process_Condor_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
+{
+ /* Determine if absolute memory has any
+ * holes so that we can interpret the
+ * access map we get back from the hypervisor
+ * correctly.
+ */
+
+ unsigned long holeFirstChunk, holeSizeChunks;
+ unsigned long numMemoryBlocks = 1;
+ struct IoHriMainStoreSegment4 * msVpd = (struct IoHriMainStoreSegment4 *)xMsVpd;
+ unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
+ unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
+ unsigned long holeSize = holeEnd - holeStart;
+
+ printk("Mainstore_VPD: Condor\n");
+
+ mb_array[0].logicalStart = 0;
+ mb_array[0].logicalEnd = 0x100000000;
+ mb_array[0].absStart = 0;
+ mb_array[0].absEnd = 0x100000000;
+
+ if ( holeSize ) {
+ numMemoryBlocks = 2;
+ holeStart = holeStart & 0x000fffffffffffff;
+ holeStart = addr_to_chunk(holeStart);
+ holeFirstChunk = holeStart;
+ holeSize = addr_to_chunk(holeSize);
+ holeSizeChunks = holeSize;
+ printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
+ holeFirstChunk, holeSizeChunks );
+ mb_array[0].logicalEnd = holeFirstChunk;
+ mb_array[0].absEnd = holeFirstChunk;
+ mb_array[1].logicalStart = holeFirstChunk;
+ mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
+ mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
+ mb_array[1].absEnd = 0x100000000;
+ }
+
+
+ return numMemoryBlocks;
+}
+
+#define MaxSegmentAreas 32
+#define MaxSegmentAdrRangeBlocks 128
+#define MaxAreaRangeBlocks 4
+unsigned long iSeries_process_Regatta_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
+{
+ struct IoHriMainStoreSegment5 * msVpdP = (struct IoHriMainStoreSegment5 *)xMsVpd;
+ unsigned long numSegmentBlocks = 0;
+ u32 existsBits = msVpdP->msAreaExists;
+ unsigned long area_num;
+
+ printk("Mainstore_VPD: Regatta\n");
+
+ for ( area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
+ unsigned long numAreaBlocks;
+ struct IoHriMainStoreArea4 * currentArea;
+
+ if ( existsBits & 0x80000000 ) {
+ unsigned long block_num;
+
+ currentArea = &msVpdP->msAreaArray[area_num];
+ numAreaBlocks = currentArea->numAdrRangeBlocks;
+
+ printk("ms_vpd: processing area %2ld blocks=%ld", area_num, numAreaBlocks);
+
+ for ( block_num = 0; block_num < numAreaBlocks; ++block_num ) {
+ /* Process an address range block */
+ struct MemoryBlock tempBlock;
+ unsigned long i;
+
+ tempBlock.absStart = (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
+ tempBlock.absEnd = (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
+ tempBlock.logicalStart = 0;
+ tempBlock.logicalEnd = 0;
+
+ printk("\n block %ld absStart=%016lx absEnd=%016lx", block_num,
+ tempBlock.absStart, tempBlock.absEnd);
+
+ for ( i=0; i<numSegmentBlocks; ++i ) {
+ if ( mb_array[i].absStart == tempBlock.absStart )
+ break;
+ }
+ if ( i == numSegmentBlocks ) {
+ if ( numSegmentBlocks == max_entries ) {
+ panic("iSeries_process_mainstore_vpd: too many memory blocks");
+ }
+ mb_array[numSegmentBlocks] = tempBlock;
+ ++numSegmentBlocks;
+ }
+ else {
+ printk(" (duplicate)");
+ }
+ }
+ printk("\n");
+ }
+ existsBits <<= 1;
+ }
+ /* Now sort the blocks found into ascending sequence */
+ if ( numSegmentBlocks > 1 ) {
+ unsigned long m, n;
+ for ( m=0; m<numSegmentBlocks-1; ++m ) {
+ for ( n=numSegmentBlocks-1; m<n; --n ) {
+ if ( mb_array[n].absStart < mb_array[n-1].absStart ) {
+ struct MemoryBlock tempBlock;
+ tempBlock = mb_array[n];
+ mb_array[n] = mb_array[n-1];
+ mb_array[n-1] = tempBlock;
+ }
+
+ }
+ }
+ }
+ /* Assign "logical" addresses to each block. These
+ * addresses correspond to the hypervisor "bitmap" space.
+ * Convert all addresses into units of 256K chunks.
+ */
+ {
+ unsigned long i, nextBitmapAddress;
+ printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
+ nextBitmapAddress = 0;
+ for ( i=0; i<numSegmentBlocks; ++i ) {
+ unsigned long length = mb_array[i].absEnd - mb_array[i].absStart;
+ mb_array[i].logicalStart = nextBitmapAddress;
+ mb_array[i].logicalEnd = nextBitmapAddress + length;
+ nextBitmapAddress += length;
+ printk(" Bitmap range: %016lx - %016lx\n"
+ " Absolute range: %016lx - %016lx\n",
+ mb_array[i].logicalStart, mb_array[i].logicalEnd,
+ mb_array[i].absStart, mb_array[i].absEnd);
+ mb_array[i].absStart = addr_to_chunk( mb_array[i].absStart & 0x000fffffffffffff );
+ mb_array[i].absEnd = addr_to_chunk( mb_array[i].absEnd & 0x000fffffffffffff );
+ mb_array[i].logicalStart = addr_to_chunk( mb_array[i].logicalStart );
+ mb_array[i].logicalEnd = addr_to_chunk( mb_array[i].logicalEnd );
+ }
+ }
+
+ return numSegmentBlocks;
+
+}
+
+unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
+{
+ unsigned long i;
+ unsigned long mem_blocks = 0;
+ if ( __is_processor( PV_POWER4 ) )
+ mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
+ else
+ mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
+
+ printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
+ for ( i=0; i<mem_blocks; ++i ) {
+ printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
+ " abs chunks %016lx - %016lx\n",
+ i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
+ mb_array[i].absStart, mb_array[i].absEnd);
+ }
+
+ return mem_blocks;
+}
+
+/*
+ * void __init iSeries_init_early()
+ */
+
+
+
+void __init
+iSeries_init_early(void)
+{
+#ifdef CONFIG_PPC_ISERIES
+ ppcdbg_initialize();
+
+#if defined(CONFIG_BLK_DEV_INITRD)
+ /*
+ * If the init RAM disk has been configured and there is
+ * a non-zero starting address for it, set it up
+ */
+
+ if ( naca->xRamDisk ) {
+ initrd_start = (unsigned long)__va(naca->xRamDisk);
+ initrd_end = initrd_start + naca->xRamDiskSize * PAGE_SIZE;
+ initrd_below_start_ok = 1; // ramdisk in kernel space
+ ROOT_DEV = MKDEV( RAMDISK_MAJOR, 0 );
+
+ if ( ((rd_size*1024)/PAGE_SIZE) < naca->xRamDiskSize )
+ rd_size = (naca->xRamDiskSize*PAGE_SIZE)/1024;
+ } else
+
+#endif /* CONFIG_BLK_DEV_INITRD */
+ {
+
+ /* ROOT_DEV = MKDEV( VIODASD_MAJOR, 1 ); */
+ }
+
+ iSeries_recal_tb = get_tb();
+ iSeries_recal_titan = HvCallXm_loadTod();
+
+ ppc_md.setup_arch = iSeries_setup_arch;
+ ppc_md.setup_residual = iSeries_setup_residual;
+ ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
+ ppc_md.irq_cannonicalize = NULL;
+ ppc_md.init_IRQ = iSeries_init_IRQ;
+ ppc_md.init_ras_IRQ = NULL;
+ ppc_md.get_irq = iSeries_get_irq;
+ ppc_md.init = NULL;
+
+ ppc_md.pcibios_fixup = iSeries_pcibios_fixup;
+ ppc_md.pcibios_fixup_bus = iSeries_pcibios_fixup_bus;
+
+ ppc_md.restart = iSeries_restart;
+ ppc_md.power_off = iSeries_power_off;
+ ppc_md.halt = iSeries_halt;
+
+ ppc_md.time_init = NULL;
+ ppc_md.get_boot_time = iSeries_get_boot_time;
+ ppc_md.set_rtc_time = iSeries_set_rtc_time;
+ ppc_md.get_rtc_time = iSeries_get_rtc_time;
+ ppc_md.calibrate_decr = iSeries_calibrate_decr;
+ ppc_md.progress = iSeries_progress;
+
+ ppc_md.kbd_setkeycode = NULL;
+ ppc_md.kbd_getkeycode = NULL;
+ ppc_md.kbd_translate = NULL;
+ ppc_md.kbd_unexpected_up = NULL;
+ ppc_md.kbd_leds = NULL;
+ ppc_md.kbd_init_hw = NULL;
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+ ppc_md.ppc_kbd_sysrq_xlate = NULL;
+#endif
+
+ hpte_init_iSeries();
+ tce_init_iSeries();
+
+ /* Initialize the table which translate Linux physical addresses to
+ * AS/400 absolute addresses
+ */
+
+ build_iSeries_Memory_Map();
+
+ setup_iSeries_cache_sizes();
+
+ /* Initialize machine-dependency vectors */
+
+
+#ifdef CONFIG_SMP
+ smp_init_iSeries();
+#endif
+
+ if ( itLpNaca.xPirEnvironMode == 0 )
+ piranha_simulator = 1;
+#endif
+}
+
+/*
+ * void __init iSeries_init()
+ */
+
+void __init
+iSeries_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ /* Associate Lp Event Queue 0 with processor 0 */
+ HvCallEvent_setLpEventQueueInterruptProc( 0, 0 );
+
+ {
+ /* copy the command line parameter from the primary VSP */
+ char *p, *q;
+ HvCallEvent_dmaToSp( cmd_line,
+ 2*64*1024,
+ 256,
+ HvLpDma_Direction_RemoteToLocal );
+
+ p = q = cmd_line + 255;
+ while( p > cmd_line ) {
+ if ((*p == 0) || (*p == ' ') || (*p == '\n'))
+ --p;
+ else
+ break;
+ }
+ if ( p < q )
+ *(p+1) = 0;
+ }
+
+ if (strstr(cmd_line, "dprofile=")) {
+ char *p, *q;
+
+ for (q = cmd_line; (p = strstr(q, "dprofile=")) != 0; ) {
+ unsigned long size, new_klimit;
+ q = p + 9;
+ if (p > cmd_line && p[-1] != ' ')
+ continue;
+ dprof_shift = simple_strtoul(q, &q, 0);
+ dprof_len = (unsigned long)&_etext - (unsigned long)&_stext;
+ dprof_len >>= dprof_shift;
+ size = ((dprof_len * sizeof(unsigned int)) + (PAGE_SIZE-1)) & PAGE_MASK;
+ dprof_buffer = (unsigned int *)((klimit + (PAGE_SIZE-1)) & PAGE_MASK);
+ new_klimit = ((unsigned long)dprof_buffer) + size;
+ lmb_reserve( __pa(klimit), (new_klimit-klimit));
+ klimit = new_klimit;
+ memset( dprof_buffer, 0, size );
+ }
+ }
+
+ iSeries_setup_dprofile();
+
+ iSeries_proc_early_init();
+ mf_init();
+ mf_initialized = 1;
+ mb();
+
+ iSeries_proc_callback( &pmc_proc_init );
+}
+
+#ifdef CONFIG_PPC_ISERIES
+/*
+ * The iSeries may have very large memories ( > 128 GB ) and a partition
+ * may get memory in "chunks" that may be anywhere in the 2**52 real
+ * address space. The chunks are 256K in size. To map this to the
+ * memory model Linux expects, the AS/400 specific code builds a
+ * translation table to translate what Linux thinks are "physical"
+ * addresses to the actual real addresses. This allows us to make
+ * it appear to Linux that we have contiguous memory starting at
+ * physical address zero while in fact this could be far from the truth.
+ * To avoid confusion, I'll let the words physical and/or real address
+ * apply to the Linux addresses while I'll use "absolute address" to
+ * refer to the actual hardware real address.
+ *
+ * build_iSeries_Memory_Map gets information from the Hypervisor and
+ * looks at the Main Store VPD to determine the absolute addresses
+ * of the memory that has been assigned to our partition and builds
+ * a table used to translate Linux's physical addresses to these
+ * absolute addresses. Absolute addresses are needed when
+ * communicating with the hypervisor (e.g. to build HPT entries)
+ */
+
+static void __init build_iSeries_Memory_Map(void)
+{
+ u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
+ u32 nextPhysChunk;
+ u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
+ u32 num_ptegs;
+ u32 totalChunks,moreChunks;
+ u32 currChunk, thisChunk, absChunk;
+ u32 currDword;
+ u32 chunkBit;
+ u64 map;
+ struct MemoryBlock mb[32];
+ unsigned long numMemoryBlocks, curBlock;
+
+ /* Chunk size on iSeries is 256K bytes */
+ totalChunks = (u32)HvLpConfig_getMsChunks();
+ klimit = msChunks_alloc(klimit, totalChunks, 1UL<<18);
+
+ /* Get absolute address of our load area
+ * and map it to physical address 0
+ * This guarantees that the loadarea ends up at physical 0
+ * otherwise, it might not be returned by PLIC as the first
+ * chunks
+ */
+
+ loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
+ loadAreaSize = itLpNaca.xLoadAreaChunks;
+
+ /* Only add the pages already mapped here.
+ * Otherwise we might add the hpt pages
+ * The rest of the pages of the load area
+ * aren't in the HPT yet and can still
+ * be assigned an arbitrary physical address
+ */
+ if ( (loadAreaSize * 64) > HvPagesToMap )
+ loadAreaSize = HvPagesToMap / 64;
+
+ loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
+
+ /* TODO Do we need to do something if the HPT is in the 64MB load area?
+ * This would be required if the itLpNaca.xLoadAreaChunks includes
+ * the HPT size
+ */
+
+ printk( "Mapping load area - physical addr = 0000000000000000\n"
+ " absolute addr = %016lx\n",
+ chunk_to_addr(loadAreaFirstChunk) );
+ printk( "Load area size %dK\n", loadAreaSize*256 );
+
+ for ( nextPhysChunk = 0;
+ nextPhysChunk < loadAreaSize;
+ ++nextPhysChunk ) {
+ msChunks.abs[nextPhysChunk] = loadAreaFirstChunk+nextPhysChunk;
+ }
+
+ /* Get absolute address of our HPT and remember it so
+ * we won't map it to any physical address
+ */
+
+ hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
+ hptSizePages = (u32)(HvCallHpt_getHptPages());
+ hptSizeChunks = hptSizePages >> (msChunks.chunk_shift-PAGE_SHIFT);
+ hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
+
+ printk( "HPT absolute addr = %016lx, size = %dK\n",
+ chunk_to_addr(hptFirstChunk), hptSizeChunks*256 );
+
+ /* Fill in the htab_data structure */
+
+ /* Fill in size of hashed page table */
+ num_ptegs = hptSizePages * (PAGE_SIZE/(sizeof(HPTE)*HPTES_PER_GROUP));
+ htab_data.htab_num_ptegs = num_ptegs;
+ htab_data.htab_hash_mask = num_ptegs - 1;
+
+ /* The actual hashed page table is in the hypervisor, we have no direct access */
+ htab_data.htab = NULL;
+
+ /* Determine if absolute memory has any
+ * holes so that we can interpret the
+ * access map we get back from the hypervisor
+ * correctly.
+ */
+ numMemoryBlocks = iSeries_process_mainstore_vpd( mb, 32 );
+
+ /* Process the main store access map from the hypervisor
+ * to build up our physical -> absolute translation table
+ */
+ curBlock = 0;
+ currChunk = 0;
+ currDword = 0;
+ moreChunks = totalChunks;
+
+ while ( moreChunks ) {
+ map = HvCallSm_get64BitsOfAccessMap( itLpNaca.xLpIndex,
+ currDword );
+ thisChunk = currChunk;
+ while ( map ) {
+ chunkBit = map >> 63;
+ map <<= 1;
+ if ( chunkBit ) {
+ --moreChunks;
+
+ while ( thisChunk >= mb[curBlock].logicalEnd ) {
+ ++curBlock;
+ if ( curBlock >= numMemoryBlocks )
+ panic("out of memory blocks");
+ }
+ if ( thisChunk < mb[curBlock].logicalStart )
+ panic("memory block error");
+
+ absChunk = mb[curBlock].absStart + ( thisChunk - mb[curBlock].logicalStart );
+
+ if ( ( ( absChunk < hptFirstChunk ) ||
+ ( absChunk > hptLastChunk ) ) &&
+ ( ( absChunk < loadAreaFirstChunk ) ||
+ ( absChunk > loadAreaLastChunk ) ) ) {
+ msChunks.abs[nextPhysChunk] = absChunk;
+ ++nextPhysChunk;
+ }
+ }
+ ++thisChunk;
+ }
+ ++currDword;
+ currChunk += 64;
+ }
+
+ /* main store size (in chunks) is
+ * totalChunks - hptSizeChunks
+ * which should be equal to
+ * nextPhysChunk
+ */
+ naca->physicalMemorySize = chunk_to_addr(nextPhysChunk);
+
+ /* Bolt kernel mappings for all of memory */
+ iSeries_bolt_kernel( 0, naca->physicalMemorySize );
+
+ lmb_init();
+ lmb_add( 0, naca->physicalMemorySize );
+ lmb_reserve( 0, __pa(klimit));
+
+ /*
+ * Hardcode to GP size. I am not sure where to get this info. DRENG
+ */
+ naca->slb_size = 64;
+}
+
+/*
+ * Set up the variables that describe the cache line sizes
+ * for this machine.
+ */
+
+static void __init setup_iSeries_cache_sizes(void)
+{
+ unsigned i,n;
+ naca->iCacheL1LineSize = xIoHriProcessorVpd[0].xInstCacheOperandSize;
+ naca->dCacheL1LineSize = xIoHriProcessorVpd[0].xDataCacheOperandSize;
+ naca->iCacheL1LinesPerPage = PAGE_SIZE / naca->iCacheL1LineSize;
+ naca->dCacheL1LinesPerPage = PAGE_SIZE / naca->dCacheL1LineSize;
+ i = naca->iCacheL1LineSize;
+ n = 0;
+ while ((i=(i/2))) ++n;
+ naca->iCacheL1LogLineSize = n;
+ i = naca->dCacheL1LineSize;
+ n = 0;
+ while ((i=(i/2))) ++n;
+ naca->dCacheL1LogLineSize = n;
+
+ printk( "D-cache line size = %d (log = %d)\n",
+ (unsigned)naca->dCacheL1LineSize,
+ (unsigned)naca->dCacheL1LogLineSize );
+ printk( "I-cache line size = %d (log = %d)\n",
+ (unsigned)naca->iCacheL1LineSize,
+ (unsigned)naca->iCacheL1LogLineSize );
+
+}
+
+/*
+ * Bolt the kernel addr space into the HPT
+ */
+
+static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
+{
+ unsigned long pa;
+ unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
+ HPTE hpte;
+
+ for (pa=saddr; pa < eaddr ;pa+=PAGE_SIZE) {
+ unsigned long ea = (unsigned long)__va(pa);
+ unsigned long vsid = get_kernel_vsid( ea );
+ unsigned long va = ( vsid << 28 ) | ( pa & 0xfffffff );
+ unsigned long vpn = va >> PAGE_SHIFT;
+ unsigned long slot = HvCallHpt_findValid( &hpte, vpn );
+ if ( hpte.dw0.dw0.v ) {
+ /* HPTE exists, so just bolt it */
+ HvCallHpt_setSwBits( slot, 0x10, 0 );
+ } else {
+ /* No HPTE exists, so create a new bolted one */
+ build_valid_hpte(vsid, ea, pa, NULL, mode_rw, 1);
+ }
+ }
+}
+#endif /* CONFIG_PPC_ISERIES */
+
+/*
+ * Document me.
+ */
+void __init
+iSeries_setup_arch(void)
+{
+ void * eventStack;
+
+ /* Setup the Lp Event Queue */
+
+ /* Allocate a page for the Event Stack
+ * The hypervisor wants the absolute real address, so
+ * we subtract out the KERNELBASE and add in the
+ * absolute real address of the kernel load area
+ */
+
+ eventStack = alloc_bootmem_pages( LpEventStackSize );
+
+ memset( eventStack, 0, LpEventStackSize );
+
+ /* Invoke the hypervisor to initialize the event stack */
+
+ HvCallEvent_setLpEventStack( 0, eventStack, LpEventStackSize );
+
+ /* Initialize fields in our Lp Event Queue */
+
+ xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
+ xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
+ xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
+ (LpEventStackSize - LpEventMaxSize);
+ xItLpQueue.xIndex = 0;
+
+ /* Compute processor frequency */
+ procFreqHz = (((1UL<<34) * 1000000) / xIoHriProcessorVpd[0].xProcFreq );
+ procFreqMhz = procFreqHz / 1000000;
+ procFreqMhzHundreths = (procFreqHz/10000) - (procFreqMhz*100);
+
+ /* Compute time base frequency */
+ tbFreqHz = (((1UL<<32) * 1000000) / xIoHriProcessorVpd[0].xTimeBaseFreq );
+ tbFreqMhz = tbFreqHz / 1000000;
+ tbFreqMhzHundreths = (tbFreqHz/10000) - (tbFreqMhz*100);
+
+ printk("Max logical processors = %d\n",
+ itVpdAreas.xSlicMaxLogicalProcs );
+ printk("Max physical processors = %d\n",
+ itVpdAreas.xSlicMaxPhysicalProcs );
+ printk("Processor frequency = %lu.%02lu\n",
+ procFreqMhz,
+ procFreqMhzHundreths );
+ printk("Time base frequency = %lu.%02lu\n",
+ tbFreqMhz,
+ tbFreqMhzHundreths );
+ printk("Processor version = %x\n",
+ xIoHriProcessorVpd[0].xPVR );
+
+}
+
+/*
+ * int iSeries_setup_residual()
+ *
+ * Description:
+ * This routine pretty-prints CPU information gathered from the VPD
+ * for use in /proc/cpuinfo
+ *
+ * Input(s):
+ * *buffer - Buffer into which CPU data is to be printed.
+ *
+ * Output(s):
+ * *buffer - Buffer with CPU data.
+ *
+ * Returns:
+ * The number of bytes copied into 'buffer' if OK, otherwise zero or less
+ * on error.
+ */
+void
+iSeries_setup_residual(struct seq_file *m, unsigned long cpu_id)
+{
+ seq_printf(m, "clock\t\t: %lu.%02luMhz\n", procFreqMhz,
+ procFreqMhzHundreths);
+ seq_printf(m, "time base\t: %lu.%02luMHz\n", tbFreqMhz,
+ tbFreqMhzHundreths);
+ seq_printf(m, "i-cache\t\t: %d\n", naca->iCacheL1LineSize);
+ seq_printf(m, "d-cache\t\t: %d\n", naca->dCacheL1LineSize);
+}
+
+void iSeries_get_cpuinfo(struct seq_file *m)
+{
+ seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
+}
+
+/*
+ * Document me.
+ * and Implement me.
+ */
+int
+iSeries_get_irq(struct pt_regs *regs)
+{
+ /* -2 means ignore this interrupt */
+ return -2;
+}
+
+/*
+ * Document me.
+ */
+void
+iSeries_restart(char *cmd)
+{
+ mf_reboot();
+}
+
+/*
+ * Document me.
+ */
+void
+iSeries_power_off(void)
+{
+ mf_powerOff();
+}
+
+/*
+ * Document me.
+ */
+void
+iSeries_halt(void)
+{
+ mf_powerOff();
+}
+
+/*
+ * Nothing to do here.
+ */
+void __init
+iSeries_time_init(void)
+{
+ /* Nothing to do */
+}
+
+/* JDH Hack */
+unsigned long jdh_time = 0;
+
+extern void setup_default_decr(void);
+
+/*
+ * void __init iSeries_calibrate_decr()
+ *
+ * Description:
+ * This routine retrieves the internal processor frequency from the VPD,
+ * and sets up the kernel timer decrementer based on that value.
+ *
+ */
+void __init
+iSeries_calibrate_decr(void)
+{
+ unsigned long freq;
+ unsigned long cyclesPerUsec;
+ unsigned long tbf;
+
+ struct div_result divres;
+
+ /* Compute decrementer (and TB) frequency
+ * in cycles/sec
+ */
+
+ tbf = xIoHriProcessorVpd[0].xTimeBaseFreq;
+
+ freq = 0x0100000000;
+ freq *= 1000000; /* 2^32 * 10^6 */
+ freq = freq / tbf; /* cycles / sec */
+ cyclesPerUsec = freq / 1000000; /* cycles / usec */
+
+ /* Set the amount to refresh the decrementer by. This
+ * is the number of decrementer ticks it takes for
+ * 1/HZ seconds.
+ */
+
+ tb_ticks_per_jiffy = freq / HZ;
+ /*
+ * tb_ticks_per_sec = freq; would give better accuracy
+ * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
+ * that jiffies (and xtime) will match the time returned
+ * by do_gettimeofday.
+ */
+ tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
+ tb_ticks_per_usec = cyclesPerUsec;
+ tb_to_us = mulhwu_scale_factor(freq, 1000000);
+ div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
+ tb_to_xs = divres.result_low;
+ setup_default_decr();
+}
+
+void __init
+iSeries_progress( char * st, unsigned short code )
+{
+ printk( "Progress: [%04x] - %s\n", (unsigned)code, st );
+ if ( !piranha_simulator && mf_initialized ) {
+ if (code != 0xffff)
+ mf_displayProgress( code );
+ else
+ mf_clearSrc();
+ }
+}
+
+
+void iSeries_fixup_klimit(void)
+{
+ /* Change klimit to take into account any ram disk that may be included */
+ if (naca->xRamDisk)
+ klimit = KERNELBASE + (u64)naca->xRamDisk + (naca->xRamDiskSize * PAGE_SIZE);
+ else {
+ /* No ram disk was included - check and see if there was an embedded system map */
+ /* Change klimit to take into account any embedded system map */
+ if (embedded_sysmap_end)
+ klimit = KERNELBASE + ((embedded_sysmap_end+4095) & 0xfffffffffffff000);
+ }
+}
+
+static void iSeries_setup_dprofile(void)
+{
+ if ( dprof_buffer ) {
+ unsigned i;
+ for (i=0; i<maxPacas; ++i) {
+ xPaca[i].prof_shift = dprof_shift;
+ xPaca[i].prof_len = dprof_len-1;
+ xPaca[i].prof_buffer = dprof_buffer;
+ xPaca[i].prof_stext = (unsigned *)&_stext;
+ mb();
+ xPaca[i].prof_enabled = 1;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
+ * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
+ *
+ * Module name: as400_setup.h
+ *
+ * Description:
+ * Architecture- / platform-specific boot-time initialization code for
+ * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
+ * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
+ * <dan@netx4.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ISERIES_SETUP_H__
+#define __ISERIES_SETUP_H__
+
+#include <linux/seq_file.h>
+
+extern void iSeries_init_early(void);
+extern void iSeries_init(unsigned long r3,
+ unsigned long ird_start,
+ unsigned long ird_end,
+ unsigned long cline_start,
+ unsigned long cline_end);
+extern void iSeries_setup_arch(void);
+extern void iSeries_setup_residual(struct seq_file *m,
+ unsigned long cpu_id);
+extern void iSeries_get_cpuinfo(struct seq_file *m);
+extern void iSeries_init_IRQ(void);
+extern int iSeries_get_irq(struct pt_regs *regs);
+extern void iSeries_restart(char *cmd);
+extern void iSeries_power_off(void);
+extern void iSeries_halt(void);
+extern void iSeries_time_init(void);
+extern void iSeries_get_boot_time(struct rtc_time *tm);
+extern int iSeries_set_rtc_time(unsigned long now);
+extern unsigned long iSeries_get_rtc_time(void);
+extern void iSeries_calibrate_decr(void);
+extern void iSeries_progress( char *, unsigned short );
+
+#endif /* __ISERIES_SETUP_H__ */
--- /dev/null
+/*
+ * Idle daemon for PowerPC. Idle daemon will handle any action
+ * that needs to be taken when the system becomes idle.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+
+#include <asm/time.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/iSeries/HvCall.h>
+#include <asm/iSeries/ItLpQueue.h>
+
+unsigned long maxYieldTime = 0;
+unsigned long minYieldTime = 0xffffffffffffffffUL;
+
+#ifdef CONFIG_PPC_ISERIES
+static void yield_shared_processor(void)
+{
+ struct Paca *paca;
+ unsigned long tb;
+ unsigned long yieldTime;
+
+ paca = (struct Paca *)mfspr(SPRG3);
+ HvCall_setEnabledInterrupts( HvCall_MaskIPI |
+ HvCall_MaskLpEvent |
+ HvCall_MaskLpProd |
+ HvCall_MaskTimeout );
+
+ tb = get_tb();
+ /* Compute future tb value when yield should expire */
+ HvCall_yieldProcessor( HvCall_YieldTimed, tb+tb_ticks_per_jiffy );
+
+ yieldTime = get_tb() - tb;
+ if ( yieldTime > maxYieldTime )
+ maxYieldTime = yieldTime;
+
+ if ( yieldTime < minYieldTime )
+ minYieldTime = yieldTime;
+
+ /* The decrementer stops during the yield. Force a fake decrementer
+ * here and let the timer_interrupt code sort out the actual time.
+ */
+ paca->xLpPaca.xIntDword.xFields.xDecrInt = 1;
+ process_iSeries_events();
+}
+#endif /* CONFIG_PPC_ISERIES */
+
+int idled(void)
+{
+ struct Paca *paca;
+ long oldval;
+#ifdef CONFIG_PPC_ISERIES
+ unsigned long CTRL;
+#endif
+
+ /* endless loop with no priority at all */
+#ifdef CONFIG_PPC_ISERIES
+ /* ensure iSeries run light will be out when idle */
+ current->thread.flags &= ~PPC_FLAG_RUN_LIGHT;
+ CTRL = mfspr(CTRLF);
+ CTRL &= ~RUNLATCH;
+ mtspr(CTRLT, CTRL);
+#endif
+
+ printk("cpu %d hits idle loop\n", smp_processor_id());
+
+ paca = (struct Paca *)mfspr(SPRG3);
+
+ while(1) {
+ if (need_resched()) {
+ schedule();
+ check_pgt_cache();
+ }
+ }
+
+ for (;;) {
+#ifdef CONFIG_PPC_ISERIES
+ if ( paca->xLpPaca.xSharedProc ) {
+ if ( ItLpQueue_isLpIntPending( paca->lpQueuePtr ) )
+ process_iSeries_events();
+ if (!need_resched())
+ yield_shared_processor();
+ }
+ else
+#endif
+ {
+ if (!need_resched()) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ while (!test_thread_flag(TIF_NEED_RESCHED)) {
+#ifdef CONFIG_PPC_ISERIES
+ HMT_medium();
+ if (ItLpQueue_isLpIntPending(paca->lpQueuePtr))
+ process_iSeries_events();
+#endif
+ HMT_low();
+ }
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ }
+ }
+ HMT_medium();
+ if (need_resched()) {
+ schedule();
+ check_pgt_cache();
+ }
+ }
+ return 0;
+}
+
+/*
+ * SMP entry into the idle task - calls the same thing as the
+ * non-smp versions. -- Cort
+ */
+int cpu_idle(void)
+{
+ idled();
+ return 0;
+}
--- /dev/null
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
--- /dev/null
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ * Based on sparc64 ioctl32.c by:
+ *
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ *
+ * ppc64 changes:
+ *
+ * Copyright (C) 2000 Ken Aaker (kdaaker@rchland.vnet.ibm.com)
+ * Copyright (C) 2001 Anton Blanchard (antonb@au.ibm.com)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl.h>
+#include <linux/if.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/raid/md.h>
+#include <linux/kd.h>
+#include <linux/route.h>
+#include <linux/in6.h>
+#include <linux/ipv6_route.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/vt.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fd.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/mtio.h>
+#include <linux/cdrom.h>
+#include <linux/loop.h>
+#include <linux/auto_fs.h>
+#include <linux/devfs_fs.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/fb.h>
+#include <linux/ext2_fs.h>
+#include <linux/videodev.h>
+#include <linux/netdevice.h>
+#include <linux/raw.h>
+#include <linux/smb_fs.h>
+#include <linux/blkpg.h>
+#include <linux/blk.h>
+#include <linux/elevator.h>
+#include <linux/rtc.h>
+#include <linux/pci.h>
+#if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+#include <linux/lvm.h>
+#endif /* LVM */
+
+#include <scsi/scsi.h>
+/* Ugly hack. */
+#undef __KERNEL__
+#include <scsi/scsi_ioctl.h>
+#define __KERNEL__
+#include <scsi/sg.h>
+
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_bonding.h>
+#include <asm/module.h>
+#include <linux/soundcard.h>
+#include <linux/watchdog.h>
+#include <linux/lp.h>
+
+#include <linux/atm.h>
+#include <linux/atmarp.h>
+#include <linux/atmclip.h>
+#include <linux/atmdev.h>
+#include <linux/atmioc.h>
+#include <linux/atmlec.h>
+#include <linux/atmmpc.h>
+#include <linux/atmsvc.h>
+#include <linux/atm_tcp.h>
+#include <linux/sonet.h>
+#include <linux/atm_suni.h>
+#include <linux/mtd/mtd.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include <linux/usb.h>
+#include <linux/usbdevice_fs.h>
+#include <linux/nbd.h>
+#include <asm/ppc32.h>
+#include <asm/ppcdebug.h>
+
+/* Use this to get at 32-bit user passed pointers.
+ See sys_sparc32.c for description about these. */
+#define A(__x) ((unsigned long)(__x))
+#define AA(__x) \
+({ unsigned long __ret; \
+ __asm__ ("clrldi %0, %0, 32" \
+ : "=r" (__ret) \
+ : "0" (__x)); \
+ __ret; \
+})
+
+/* Aiee. Someone does not find a difference between int and long */
+#define EXT2_IOC32_GETFLAGS _IOR('f', 1, int)
+#define EXT2_IOC32_SETFLAGS _IOW('f', 2, int)
+#define EXT2_IOC32_GETVERSION _IOR('v', 1, int)
+#define EXT2_IOC32_SETVERSION _IOW('v', 2, int)
+
+extern asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
+
+static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ int err;
+ unsigned long val;
+
+ set_fs (KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&val);
+ set_fs (old_fs);
+ if (!err && put_user(val, (u32 *)arg))
+ return -EFAULT;
+ return err;
+}
+
+static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ int err;
+ unsigned long val;
+
+ if (get_user(val, (u32 *)arg))
+ return -EFAULT;
+ set_fs (KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&val);
+ set_fs (old_fs);
+ if (!err && put_user(val, (u32 *)arg))
+ return -EFAULT;
+ return err;
+}
+
+static int do_ext2_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ /* These are just misnamed, they actually get/put from/to user an int */
+ switch (cmd) {
+ case EXT2_IOC32_GETFLAGS: cmd = EXT2_IOC_GETFLAGS; break;
+ case EXT2_IOC32_SETFLAGS: cmd = EXT2_IOC_SETFLAGS; break;
+ case EXT2_IOC32_GETVERSION: cmd = EXT2_IOC_GETVERSION; break;
+ case EXT2_IOC32_SETVERSION: cmd = EXT2_IOC_SETVERSION; break;
+ }
+ return sys_ioctl(fd, cmd, arg);
+}
+
+struct video_tuner32 {
+ s32 tuner;
+ u8 name[32];
+ u32 rangelow, rangehigh;
+ u32 flags;
+ u16 mode, signal;
+};
+
+static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 *up)
+{
+ int i;
+
+ if (get_user(kp->tuner, &up->tuner))
+ return -EFAULT;
+ for(i = 0; i < 32; i++)
+ __get_user(kp->name[i], &up->name[i]);
+ __get_user(kp->rangelow, &up->rangelow);
+ __get_user(kp->rangehigh, &up->rangehigh);
+ __get_user(kp->flags, &up->flags);
+ __get_user(kp->mode, &up->mode);
+ __get_user(kp->signal, &up->signal);
+ return 0;
+}
+
+static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 *up)
+{
+ int i;
+
+ if (put_user(kp->tuner, &up->tuner))
+ return -EFAULT;
+ for(i = 0; i < 32; i++)
+ __put_user(kp->name[i], &up->name[i]);
+ __put_user(kp->rangelow, &up->rangelow);
+ __put_user(kp->rangehigh, &up->rangehigh);
+ __put_user(kp->flags, &up->flags);
+ __put_user(kp->mode, &up->mode);
+ __put_user(kp->signal, &up->signal);
+ return 0;
+}
+
+struct video_buffer32 {
+ /* void * */ u32 base;
+ s32 height, width, depth, bytesperline;
+};
+
+static int get_video_buffer32(struct video_buffer *kp, struct video_buffer32 *up)
+{
+ u32 tmp;
+
+ if (get_user(tmp, &up->base))
+ return -EFAULT;
+ kp->base = (void *) ((unsigned long)tmp);
+ __get_user(kp->height, &up->height);
+ __get_user(kp->width, &up->width);
+ __get_user(kp->depth, &up->depth);
+ __get_user(kp->bytesperline, &up->bytesperline);
+ return 0;
+}
+
+static int put_video_buffer32(struct video_buffer *kp, struct video_buffer32 *up)
+{
+ u32 tmp = (u32)((unsigned long)kp->base);
+
+ if (put_user(tmp, &up->base))
+ return -EFAULT;
+ __put_user(kp->height, &up->height);
+ __put_user(kp->width, &up->width);
+ __put_user(kp->depth, &up->depth);
+ __put_user(kp->bytesperline, &up->bytesperline);
+ return 0;
+}
+
+struct video_clip32 {
+ s32 x, y, width, height;
+ /* struct video_clip32 * */ u32 next;
+};
+
+struct video_window32 {
+ u32 x, y, width, height, chromakey, flags;
+ /* struct video_clip32 * */ u32 clips;
+ s32 clipcount;
+};
+
+static void free_kvideo_clips(struct video_window *kp)
+{
+ struct video_clip *cp;
+
+ cp = kp->clips;
+ if (cp != NULL)
+ kfree(cp);
+}
+
+static int get_video_window32(struct video_window *kp, struct video_window32 *up)
+{
+ struct video_clip32 *ucp;
+ struct video_clip *kcp;
+ int nclips, err, i;
+ u32 tmp;
+
+ if (get_user(kp->x, &up->x))
+ return -EFAULT;
+ __get_user(kp->y, &up->y);
+ __get_user(kp->width, &up->width);
+ __get_user(kp->height, &up->height);
+ __get_user(kp->chromakey, &up->chromakey);
+ __get_user(kp->flags, &up->flags);
+ __get_user(kp->clipcount, &up->clipcount);
+ __get_user(tmp, &up->clips);
+ ucp = (struct video_clip32 *)A(tmp);
+ kp->clips = NULL;
+
+ nclips = kp->clipcount;
+ if (nclips == 0)
+ return 0;
+
+ if (ucp == 0)
+ return -EINVAL;
+
+ /* Peculiar interface... */
+ if (nclips < 0)
+ nclips = VIDEO_CLIPMAP_SIZE;
+
+ kcp = kmalloc(nclips * sizeof(struct video_clip), GFP_KERNEL);
+ err = -ENOMEM;
+ if (kcp == NULL)
+ goto cleanup_and_err;
+
+ kp->clips = kcp;
+ for(i = 0; i < nclips; i++) {
+ __get_user(kcp[i].x, &ucp[i].x);
+ __get_user(kcp[i].y, &ucp[i].y);
+ __get_user(kcp[i].width, &ucp[i].width);
+ __get_user(kcp[i].height, &ucp[i].height);
+ kcp[nclips].next = NULL;
+ }
+
+ return 0;
+
+cleanup_and_err:
+ free_kvideo_clips(kp);
+ return err;
+}
+
+/* You get back everything except the clips... */
+static int put_video_window32(struct video_window *kp, struct video_window32 *up)
+{
+ if (put_user(kp->x, &up->x))
+ return -EFAULT;
+ __put_user(kp->y, &up->y);
+ __put_user(kp->width, &up->width);
+ __put_user(kp->height, &up->height);
+ __put_user(kp->chromakey, &up->chromakey);
+ __put_user(kp->flags, &up->flags);
+ __put_user(kp->clipcount, &up->clipcount);
+ return 0;
+}
+
+#define VIDIOCGTUNER32 _IOWR('v',4, struct video_tuner32)
+#define VIDIOCSTUNER32 _IOW('v',5, struct video_tuner32)
+#define VIDIOCGWIN32 _IOR('v',9, struct video_window32)
+#define VIDIOCSWIN32 _IOW('v',10, struct video_window32)
+#define VIDIOCGFBUF32 _IOR('v',11, struct video_buffer32)
+#define VIDIOCSFBUF32 _IOW('v',12, struct video_buffer32)
+#define VIDIOCGFREQ32 _IOR('v',14, u32)
+#define VIDIOCSFREQ32 _IOW('v',15, u32)
+
+static int do_video_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ union {
+ struct video_tuner vt;
+ struct video_buffer vb;
+ struct video_window vw;
+ unsigned long vx;
+ } karg;
+ mm_segment_t old_fs = get_fs();
+ void *up = (void *)arg;
+ int err = 0;
+
+ /* First, convert the command. */
+ switch(cmd) {
+ case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break;
+ case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break;
+ case VIDIOCGWIN32: cmd = VIDIOCGWIN; break;
+ case VIDIOCSWIN32: cmd = VIDIOCSWIN; break;
+ case VIDIOCGFBUF32: cmd = VIDIOCGFBUF; break;
+ case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break;
+ case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break;
+ case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break;
+ };
+
+ switch(cmd) {
+ case VIDIOCSTUNER:
+ case VIDIOCGTUNER:
+ err = get_video_tuner32(&karg.vt, up);
+ break;
+
+ case VIDIOCSWIN:
+ err = get_video_window32(&karg.vw, up);
+ break;
+
+ case VIDIOCSFBUF:
+ err = get_video_buffer32(&karg.vb, up);
+ break;
+
+ case VIDIOCSFREQ:
+ err = get_user(karg.vx, (u32 *)up);
+ break;
+ };
+ if (err)
+ goto out;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&karg);
+ set_fs(old_fs);
+
+ if (cmd == VIDIOCSWIN)
+ free_kvideo_clips(&karg.vw);
+
+ if (err == 0) {
+ switch(cmd) {
+ case VIDIOCGTUNER:
+ err = put_video_tuner32(&karg.vt, up);
+ break;
+
+ case VIDIOCGWIN:
+ err = put_video_window32(&karg.vw, up);
+ break;
+
+ case VIDIOCGFBUF:
+ err = put_video_buffer32(&karg.vb, up);
+ break;
+
+ case VIDIOCGFREQ:
+ err = put_user(((u32)karg.vx), (u32 *)up);
+ break;
+ };
+ }
+out:
+ return err;
+}
+
+struct timeval32 {
+ int tv_sec;
+ int tv_usec;
+};
+
+static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct timeval32 *up = (struct timeval32 *)arg;
+ struct timeval ktv;
+ mm_segment_t old_fs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&ktv);
+ set_fs(old_fs);
+ if (!err) {
+ err = put_user(ktv.tv_sec, &up->tv_sec);
+ err |= __put_user(ktv.tv_usec, &up->tv_usec);
+ }
+ return err;
+}
+
+struct ifmap32 {
+ u32 mem_start;
+ u32 mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+};
+
+struct ifreq32 {
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_ivalue;
+ int ifru_mtu;
+ struct ifmap32 ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ char ifru_newname[IFNAMSIZ];
+ __kernel_caddr_t32 ifru_data;
+ } ifr_ifru;
+};
+
+struct ifconf32 {
+ int ifc_len; /* size of buffer */
+ __kernel_caddr_t32 ifcbuf;
+};
+
+#ifdef CONFIG_NET
+static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct net_device *dev;
+ struct ifreq32 ifr32;
+ int err;
+
+ if (copy_from_user(&ifr32, (struct ifreq32 *)arg, sizeof(struct ifreq32)))
+ return -EFAULT;
+
+ dev = dev_get_by_index(ifr32.ifr_ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ strcpy(ifr32.ifr_name, dev->name);
+
+ err = copy_to_user((struct ifreq32 *)arg, &ifr32, sizeof(struct ifreq32));
+ return (err ? -EFAULT : 0);
+}
+#endif
+
+static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct ifconf32 ifc32;
+ struct ifconf ifc;
+ struct ifreq32 *ifr32;
+ struct ifreq *ifr;
+ mm_segment_t old_fs;
+ unsigned int i, j;
+ int err;
+
+ if (copy_from_user(&ifc32, (struct ifconf32 *)arg, sizeof(struct ifconf32)))
+ return -EFAULT;
+
+ if (ifc32.ifcbuf == 0) {
+ ifc32.ifc_len = 0;
+ ifc.ifc_len = 0;
+ ifc.ifc_buf = NULL;
+ } else {
+ ifc.ifc_len = ((ifc32.ifc_len / sizeof (struct ifreq32)) + 1) *
+ sizeof (struct ifreq);
+ ifc.ifc_buf = kmalloc (ifc.ifc_len, GFP_KERNEL);
+ if (!ifc.ifc_buf)
+ return -ENOMEM;
+ }
+ ifr = ifc.ifc_req;
+ ifr32 = (struct ifreq32 *)A(ifc32.ifcbuf);
+ for (i = 0; i < ifc32.ifc_len; i += sizeof (struct ifreq32)) {
+ if (copy_from_user(ifr++, ifr32++, sizeof (struct ifreq32))) {
+ kfree (ifc.ifc_buf);
+ return -EFAULT;
+ }
+ }
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, SIOCGIFCONF, (unsigned long)&ifc);
+ set_fs (old_fs);
+ if (!err) {
+ ifr = ifc.ifc_req;
+ ifr32 = (struct ifreq32 *)A(ifc32.ifcbuf);
+ for (i = 0, j = 0; i < ifc32.ifc_len && j < ifc.ifc_len;
+ i += sizeof (struct ifreq32), j += sizeof (struct ifreq)) {
+ if (copy_to_user(ifr32++, ifr++, sizeof (struct ifreq32))) {
+ err = -EFAULT;
+ break;
+ }
+ }
+ if (!err) {
+ if (ifc32.ifcbuf == 0) {
+ /* Translate from 64-bit structure multiple to
+ * a 32-bit one.
+ */
+ i = ifc.ifc_len;
+ i = ((i / sizeof(struct ifreq)) * sizeof(struct ifreq32));
+ ifc32.ifc_len = i;
+ } else {
+ if (i <= ifc32.ifc_len)
+ ifc32.ifc_len = i;
+ else
+ ifc32.ifc_len = i - sizeof (struct ifreq32);
+ }
+ if (copy_to_user((struct ifconf32 *)arg, &ifc32, sizeof(struct ifconf32)))
+ err = -EFAULT;
+ }
+ }
+ if (ifc.ifc_buf != NULL)
+ kfree (ifc.ifc_buf);
+ return err;
+}
+
+static int ethtool_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ mm_segment_t old_fs;
+ int err, len;
+ u32 data, ethcmd;
+
+ if (copy_from_user(&ifr, (struct ifreq32 *)arg, sizeof(struct ifreq32)))
+ return -EFAULT;
+ ifr.ifr_data = (__kernel_caddr_t)get_free_page(GFP_KERNEL);
+ if (!ifr.ifr_data)
+ return -EAGAIN;
+
+ __get_user(data, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_data));
+
+ if (get_user(ethcmd, (u32 *)A(data))) {
+ err = -EFAULT;
+ goto out;
+ }
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: len = sizeof(struct ethtool_drvinfo); break;
+ case ETHTOOL_GMSGLVL:
+ case ETHTOOL_SMSGLVL:
+ case ETHTOOL_GLINK:
+ case ETHTOOL_NWAY_RST: len = sizeof(struct ethtool_value); break;
+ case ETHTOOL_GREGS: {
+ struct ethtool_regs *regaddr = (struct ethtool_regs *)A(data);
+ /* darned variable size arguments */
+ if (get_user(len, (u32 *)®addr->len)) {
+ err = -EFAULT;
+ goto out;
+ }
+ len += sizeof(struct ethtool_regs);
+ break;
+ }
+ case ETHTOOL_GSET:
+ case ETHTOOL_SSET: len = sizeof(struct ethtool_cmd); break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (copy_from_user(ifr.ifr_data, (char *)A(data), len)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
+ set_fs (old_fs);
+ if (!err) {
+ u32 data;
+
+ __get_user(data, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_data));
+ len = copy_to_user((char *)A(data), ifr.ifr_data, len);
+ if (len)
+ err = -EFAULT;
+ }
+
+out:
+ free_page((unsigned long)ifr.ifr_data);
+ return err;
+}
+
+static int bond_ioctl(unsigned long fd, unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ mm_segment_t old_fs;
+ int err, len;
+ u32 data;
+
+ if (copy_from_user(&ifr, (struct ifreq32 *)arg, sizeof(struct ifreq32)))
+ return -EFAULT;
+ ifr.ifr_data = (__kernel_caddr_t)get_free_page(GFP_KERNEL);
+ if (!ifr.ifr_data)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ len = IFNAMSIZ * sizeof(char);
+ break;
+ case SIOCBONDSLAVEINFOQUERY:
+ len = sizeof(struct ifslave);
+ break;
+ case SIOCBONDINFOQUERY:
+ len = sizeof(struct ifbond);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ };
+
+ __get_user(data, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_data));
+ if (copy_from_user(ifr.ifr_data, (char *)A(data), len)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
+ set_fs (old_fs);
+ if (!err) {
+ len = copy_to_user((char *)A(data), ifr.ifr_data, len);
+ if (len)
+ err = -EFAULT;
+ }
+
+out:
+ free_page((unsigned long)ifr.ifr_data);
+ return err;
+}
+
+static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ mm_segment_t old_fs;
+ int err;
+
+ switch (cmd) {
+ case SIOCSIFMAP:
+ err = copy_from_user(&ifr, (struct ifreq32 *)arg, sizeof(ifr.ifr_name));
+ err |= __get_user(ifr.ifr_map.mem_start, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.mem_start));
+ err |= __get_user(ifr.ifr_map.mem_end, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.mem_end));
+ err |= __get_user(ifr.ifr_map.base_addr, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.base_addr));
+ err |= __get_user(ifr.ifr_map.irq, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.irq));
+ err |= __get_user(ifr.ifr_map.dma, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.dma));
+ err |= __get_user(ifr.ifr_map.port, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.port));
+ if (err)
+ return -EFAULT;
+ break;
+ default:
+ if (copy_from_user(&ifr, (struct ifreq32 *)arg, sizeof(struct ifreq32)))
+ return -EFAULT;
+ break;
+ }
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
+ set_fs (old_fs);
+ if (!err) {
+ switch (cmd) {
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFINDEX:
+ case SIOCGIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFTXQLEN:
+ if (copy_to_user((struct ifreq32 *)arg, &ifr, sizeof(struct ifreq32)))
+ return -EFAULT;
+ break;
+ case SIOCGIFMAP:
+ err = copy_to_user((struct ifreq32 *)arg, &ifr, sizeof(ifr.ifr_name));
+ err |= __put_user(ifr.ifr_map.mem_start, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.mem_start));
+ err |= __put_user(ifr.ifr_map.mem_end, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.mem_end));
+ err |= __put_user(ifr.ifr_map.base_addr, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.base_addr));
+ err |= __put_user(ifr.ifr_map.irq, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.irq));
+ err |= __put_user(ifr.ifr_map.dma, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.dma));
+ err |= __put_user(ifr.ifr_map.port, &(((struct ifreq32 *)arg)->ifr_ifru.ifru_map.port));
+ if (err)
+ err = -EFAULT;
+ break;
+ }
+ }
+ return err;
+}
+
+struct rtentry32 {
+ u32 rt_pad1;
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
+ /* char * */ u32 rt_dev; /* forcing the device at add */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+
+};
+
+struct in6_rtmsg32 {
+ struct in6_addr rtmsg_dst;
+ struct in6_addr rtmsg_src;
+ struct in6_addr rtmsg_gateway;
+ u32 rtmsg_type;
+ u16 rtmsg_dst_len;
+ u16 rtmsg_src_len;
+ u32 rtmsg_metric;
+ u32 rtmsg_info;
+ u32 rtmsg_flags;
+ s32 rtmsg_ifindex;
+};
+
+extern struct socket *sockfd_lookup(int fd, int *err);
+
+static int routing_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ void *r = NULL;
+ struct in6_rtmsg r6;
+ struct rtentry r4;
+ char devname[16];
+ u32 rtdev;
+ mm_segment_t old_fs = get_fs();
+
+ struct socket *mysock = sockfd_lookup(fd, &ret);
+
+ if (mysock && mysock->sk && mysock->sk->family == AF_INET6) { /* ipv6 */
+ ret = copy_from_user (&r6.rtmsg_dst, &(((struct in6_rtmsg32 *)arg)->rtmsg_dst),
+ 3 * sizeof(struct in6_addr));
+ ret |= __get_user (r6.rtmsg_type, &(((struct in6_rtmsg32 *)arg)->rtmsg_type));
+ ret |= __get_user (r6.rtmsg_dst_len, &(((struct in6_rtmsg32 *)arg)->rtmsg_dst_len));
+ ret |= __get_user (r6.rtmsg_src_len, &(((struct in6_rtmsg32 *)arg)->rtmsg_src_len));
+ ret |= __get_user (r6.rtmsg_metric, &(((struct in6_rtmsg32 *)arg)->rtmsg_metric));
+ ret |= __get_user (r6.rtmsg_info, &(((struct in6_rtmsg32 *)arg)->rtmsg_info));
+ ret |= __get_user (r6.rtmsg_flags, &(((struct in6_rtmsg32 *)arg)->rtmsg_flags));
+ ret |= __get_user (r6.rtmsg_ifindex, &(((struct in6_rtmsg32 *)arg)->rtmsg_ifindex));
+
+ r = (void *) &r6;
+ } else { /* ipv4 */
+ ret = copy_from_user (&r4.rt_dst, &(((struct rtentry32 *)arg)->rt_dst), 3 * sizeof(struct sockaddr));
+ ret |= __get_user (r4.rt_flags, &(((struct rtentry32 *)arg)->rt_flags));
+ ret |= __get_user (r4.rt_metric, &(((struct rtentry32 *)arg)->rt_metric));
+ ret |= __get_user (r4.rt_mtu, &(((struct rtentry32 *)arg)->rt_mtu));
+ ret |= __get_user (r4.rt_window, &(((struct rtentry32 *)arg)->rt_window));
+ ret |= __get_user (r4.rt_irtt, &(((struct rtentry32 *)arg)->rt_irtt));
+ ret |= __get_user (rtdev, &(((struct rtentry32 *)arg)->rt_dev));
+ if (rtdev) {
+ ret |= copy_from_user (devname, (char *)A(rtdev), 15);
+ r4.rt_dev = devname; devname[15] = 0;
+ } else
+ r4.rt_dev = 0;
+
+ r = (void *) &r4;
+ }
+
+ if (ret)
+ return -EFAULT;
+
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, cmd, (long) r);
+ set_fs (old_fs);
+
+ return ret;
+}
+
+struct hd_geometry32 {
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ u32 start;
+};
+
+static int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct hd_geometry geo;
+ int err;
+
+ set_fs (KERNEL_DS);
+ err = sys_ioctl(fd, HDIO_GETGEO, (unsigned long)&geo);
+ set_fs (old_fs);
+ if (!err) {
+ err = copy_to_user ((struct hd_geometry32 *)arg, &geo, 4);
+ err |= __put_user (geo.start, &(((struct hd_geometry32 *)arg)->start));
+ }
+ return err ? -EFAULT : 0;
+}
+
+
+static int hdio_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ unsigned long kval;
+ unsigned int *uvp;
+ int error;
+
+ set_fs(KERNEL_DS);
+ error = sys_ioctl(fd, cmd, (long)&kval);
+ set_fs(old_fs);
+
+ if (error == 0) {
+ uvp = (unsigned int *)arg;
+ if (put_user(kval, uvp))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+struct floppy_struct32 {
+ unsigned int size;
+ unsigned int sect;
+ unsigned int head;
+ unsigned int track;
+ unsigned int stretch;
+ unsigned char gap;
+ unsigned char rate;
+ unsigned char spec1;
+ unsigned char fmt_gap;
+ const __kernel_caddr_t32 name;
+};
+
+struct floppy_drive_params32 {
+ char cmos;
+ u32 max_dtr;
+ u32 hlt;
+ u32 hut;
+ u32 srt;
+ u32 spinup;
+ u32 spindown;
+ unsigned char spindown_offset;
+ unsigned char select_delay;
+ unsigned char rps;
+ unsigned char tracks;
+ u32 timeout;
+ unsigned char interleave_sect;
+ struct floppy_max_errors max_errors;
+ char flags;
+ char read_track;
+ short autodetect[8];
+ int checkfreq;
+ int native_format;
+};
+
+struct floppy_drive_struct32 {
+ signed char flags;
+ u32 spinup_date;
+ u32 select_date;
+ u32 first_read_date;
+ short probed_format;
+ short track;
+ short maxblock;
+ short maxtrack;
+ int generation;
+ int keep_data;
+ int fd_ref;
+ int fd_device;
+ int last_checked;
+ __kernel_caddr_t32 dmabuf;
+ int bufblocks;
+};
+
+struct floppy_fdc_state32 {
+ int spec1;
+ int spec2;
+ int dtr;
+ unsigned char version;
+ unsigned char dor;
+ u32 address;
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version;
+ unsigned char track[4];
+};
+
+struct floppy_write_errors32 {
+ unsigned int write_errors;
+ u32 first_error_sector;
+ int first_error_generation;
+ u32 last_error_sector;
+ int last_error_generation;
+ unsigned int badness;
+};
+
+#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
+#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
+#define FDGETPRM32 _IOR(2, 0x04, struct floppy_struct32)
+#define FDSETDRVPRM32 _IOW(2, 0x90, struct floppy_drive_params32)
+#define FDGETDRVPRM32 _IOR(2, 0x11, struct floppy_drive_params32)
+#define FDGETDRVSTAT32 _IOR(2, 0x12, struct floppy_drive_struct32)
+#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct floppy_drive_struct32)
+#define FDGETFDCSTAT32 _IOR(2, 0x15, struct floppy_fdc_state32)
+#define FDWERRORGET32 _IOR(2, 0x17, struct floppy_write_errors32)
+
+static struct {
+ unsigned int cmd32;
+ unsigned int cmd;
+} fd_ioctl_trans_table[] = {
+ { FDSETPRM32, FDSETPRM },
+ { FDDEFPRM32, FDDEFPRM },
+ { FDGETPRM32, FDGETPRM },
+ { FDSETDRVPRM32, FDSETDRVPRM },
+ { FDGETDRVPRM32, FDGETDRVPRM },
+ { FDGETDRVSTAT32, FDGETDRVSTAT },
+ { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
+ { FDGETFDCSTAT32, FDGETFDCSTAT },
+ { FDWERRORGET32, FDWERRORGET }
+};
+
+#define NR_FD_IOCTL_TRANS (sizeof(fd_ioctl_trans_table)/sizeof(fd_ioctl_trans_table[0]))
+
+static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ void *karg = NULL;
+ unsigned int kcmd = 0;
+ int i, err;
+
+ for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
+ if (cmd == fd_ioctl_trans_table[i].cmd32) {
+ kcmd = fd_ioctl_trans_table[i].cmd;
+ break;
+ }
+ if (!kcmd)
+ return -EINVAL;
+
+ switch (cmd) {
+ case FDSETPRM32:
+ case FDDEFPRM32:
+ case FDGETPRM32:
+ {
+ struct floppy_struct *f;
+
+ f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ if (cmd == FDGETPRM32)
+ break;
+ err = __get_user(f->size, &((struct floppy_struct32 *)arg)->size);
+ err |= __get_user(f->sect, &((struct floppy_struct32 *)arg)->sect);
+ err |= __get_user(f->head, &((struct floppy_struct32 *)arg)->head);
+ err |= __get_user(f->track, &((struct floppy_struct32 *)arg)->track);
+ err |= __get_user(f->stretch, &((struct floppy_struct32 *)arg)->stretch);
+ err |= __get_user(f->gap, &((struct floppy_struct32 *)arg)->gap);
+ err |= __get_user(f->rate, &((struct floppy_struct32 *)arg)->rate);
+ err |= __get_user(f->spec1, &((struct floppy_struct32 *)arg)->spec1);
+ err |= __get_user(f->fmt_gap, &((struct floppy_struct32 *)arg)->fmt_gap);
+ err |= __get_user((u64)f->name, &((struct floppy_struct32 *)arg)->name);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ break;
+ }
+ case FDSETDRVPRM32:
+ case FDGETDRVPRM32:
+ {
+ struct floppy_drive_params *f;
+
+ f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ if (cmd == FDGETDRVPRM32)
+ break;
+ err = __get_user(f->cmos, &((struct floppy_drive_params32 *)arg)->cmos);
+ err |= __get_user(f->max_dtr, &((struct floppy_drive_params32 *)arg)->max_dtr);
+ err |= __get_user(f->hlt, &((struct floppy_drive_params32 *)arg)->hlt);
+ err |= __get_user(f->hut, &((struct floppy_drive_params32 *)arg)->hut);
+ err |= __get_user(f->srt, &((struct floppy_drive_params32 *)arg)->srt);
+ err |= __get_user(f->spinup, &((struct floppy_drive_params32 *)arg)->spinup);
+ err |= __get_user(f->spindown, &((struct floppy_drive_params32 *)arg)->spindown);
+ err |= __get_user(f->spindown_offset, &((struct floppy_drive_params32 *)arg)->spindown_offset);
+ err |= __get_user(f->select_delay, &((struct floppy_drive_params32 *)arg)->select_delay);
+ err |= __get_user(f->rps, &((struct floppy_drive_params32 *)arg)->rps);
+ err |= __get_user(f->tracks, &((struct floppy_drive_params32 *)arg)->tracks);
+ err |= __get_user(f->timeout, &((struct floppy_drive_params32 *)arg)->timeout);
+ err |= __get_user(f->interleave_sect, &((struct floppy_drive_params32 *)arg)->interleave_sect);
+ err |= __copy_from_user(&f->max_errors, &((struct floppy_drive_params32 *)arg)->max_errors, sizeof(f->max_errors));
+ err |= __get_user(f->flags, &((struct floppy_drive_params32 *)arg)->flags);
+ err |= __get_user(f->read_track, &((struct floppy_drive_params32 *)arg)->read_track);
+ err |= __copy_from_user(f->autodetect, ((struct floppy_drive_params32 *)arg)->autodetect, sizeof(f->autodetect));
+ err |= __get_user(f->checkfreq, &((struct floppy_drive_params32 *)arg)->checkfreq);
+ err |= __get_user(f->native_format, &((struct floppy_drive_params32 *)arg)->native_format);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ break;
+ }
+ case FDGETDRVSTAT32:
+ case FDPOLLDRVSTAT32:
+ karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ break;
+ case FDGETFDCSTAT32:
+ karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ break;
+ case FDWERRORGET32:
+ karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, kcmd, (unsigned long)karg);
+ set_fs (old_fs);
+ if (err)
+ goto out;
+ switch (cmd) {
+ case FDGETPRM32:
+ {
+ struct floppy_struct *f = karg;
+
+ err = __put_user(f->size, &((struct floppy_struct32 *)arg)->size);
+ err |= __put_user(f->sect, &((struct floppy_struct32 *)arg)->sect);
+ err |= __put_user(f->head, &((struct floppy_struct32 *)arg)->head);
+ err |= __put_user(f->track, &((struct floppy_struct32 *)arg)->track);
+ err |= __put_user(f->stretch, &((struct floppy_struct32 *)arg)->stretch);
+ err |= __put_user(f->gap, &((struct floppy_struct32 *)arg)->gap);
+ err |= __put_user(f->rate, &((struct floppy_struct32 *)arg)->rate);
+ err |= __put_user(f->spec1, &((struct floppy_struct32 *)arg)->spec1);
+ err |= __put_user(f->fmt_gap, &((struct floppy_struct32 *)arg)->fmt_gap);
+ err |= __put_user((u64)f->name, &((struct floppy_struct32 *)arg)->name);
+ break;
+ }
+ case FDGETDRVPRM32:
+ {
+ struct floppy_drive_params *f = karg;
+
+ err = __put_user(f->cmos, &((struct floppy_drive_params32 *)arg)->cmos);
+ err |= __put_user(f->max_dtr, &((struct floppy_drive_params32 *)arg)->max_dtr);
+ err |= __put_user(f->hlt, &((struct floppy_drive_params32 *)arg)->hlt);
+ err |= __put_user(f->hut, &((struct floppy_drive_params32 *)arg)->hut);
+ err |= __put_user(f->srt, &((struct floppy_drive_params32 *)arg)->srt);
+ err |= __put_user(f->spinup, &((struct floppy_drive_params32 *)arg)->spinup);
+ err |= __put_user(f->spindown, &((struct floppy_drive_params32 *)arg)->spindown);
+ err |= __put_user(f->spindown_offset, &((struct floppy_drive_params32 *)arg)->spindown_offset);
+ err |= __put_user(f->select_delay, &((struct floppy_drive_params32 *)arg)->select_delay);
+ err |= __put_user(f->rps, &((struct floppy_drive_params32 *)arg)->rps);
+ err |= __put_user(f->tracks, &((struct floppy_drive_params32 *)arg)->tracks);
+ err |= __put_user(f->timeout, &((struct floppy_drive_params32 *)arg)->timeout);
+ err |= __put_user(f->interleave_sect, &((struct floppy_drive_params32 *)arg)->interleave_sect);
+ err |= __copy_to_user(&((struct floppy_drive_params32 *)arg)->max_errors, &f->max_errors, sizeof(f->max_errors));
+ err |= __put_user(f->flags, &((struct floppy_drive_params32 *)arg)->flags);
+ err |= __put_user(f->read_track, &((struct floppy_drive_params32 *)arg)->read_track);
+ err |= __copy_to_user(((struct floppy_drive_params32 *)arg)->autodetect, f->autodetect, sizeof(f->autodetect));
+ err |= __put_user(f->checkfreq, &((struct floppy_drive_params32 *)arg)->checkfreq);
+ err |= __put_user(f->native_format, &((struct floppy_drive_params32 *)arg)->native_format);
+ break;
+ }
+ case FDGETDRVSTAT32:
+ case FDPOLLDRVSTAT32:
+ {
+ struct floppy_drive_struct *f = karg;
+
+ err = __put_user(f->flags, &((struct floppy_drive_struct32 *)arg)->flags);
+ err |= __put_user(f->spinup_date, &((struct floppy_drive_struct32 *)arg)->spinup_date);
+ err |= __put_user(f->select_date, &((struct floppy_drive_struct32 *)arg)->select_date);
+ err |= __put_user(f->first_read_date, &((struct floppy_drive_struct32 *)arg)->first_read_date);
+ err |= __put_user(f->probed_format, &((struct floppy_drive_struct32 *)arg)->probed_format);
+ err |= __put_user(f->track, &((struct floppy_drive_struct32 *)arg)->track);
+ err |= __put_user(f->maxblock, &((struct floppy_drive_struct32 *)arg)->maxblock);
+ err |= __put_user(f->maxtrack, &((struct floppy_drive_struct32 *)arg)->maxtrack);
+ err |= __put_user(f->generation, &((struct floppy_drive_struct32 *)arg)->generation);
+ err |= __put_user(f->keep_data, &((struct floppy_drive_struct32 *)arg)->keep_data);
+ err |= __put_user(f->fd_ref, &((struct floppy_drive_struct32 *)arg)->fd_ref);
+ err |= __put_user(f->fd_device, &((struct floppy_drive_struct32 *)arg)->fd_device);
+ err |= __put_user(f->last_checked, &((struct floppy_drive_struct32 *)arg)->last_checked);
+ err |= __put_user((u64)f->dmabuf, &((struct floppy_drive_struct32 *)arg)->dmabuf);
+ err |= __put_user((u64)f->bufblocks, &((struct floppy_drive_struct32 *)arg)->bufblocks);
+ break;
+ }
+ case FDGETFDCSTAT32:
+ {
+ struct floppy_fdc_state *f = karg;
+
+ err = __put_user(f->spec1, &((struct floppy_fdc_state32 *)arg)->spec1);
+ err |= __put_user(f->spec2, &((struct floppy_fdc_state32 *)arg)->spec2);
+ err |= __put_user(f->dtr, &((struct floppy_fdc_state32 *)arg)->dtr);
+ err |= __put_user(f->version, &((struct floppy_fdc_state32 *)arg)->version);
+ err |= __put_user(f->dor, &((struct floppy_fdc_state32 *)arg)->dor);
+ err |= __put_user(f->address, &((struct floppy_fdc_state32 *)arg)->address);
+ err |= __copy_to_user((char *)&((struct floppy_fdc_state32 *)arg)->address
+ + sizeof(((struct floppy_fdc_state32 *)arg)->address),
+ (char *)&f->address + sizeof(f->address), sizeof(int));
+ err |= __put_user(f->driver_version, &((struct floppy_fdc_state32 *)arg)->driver_version);
+ err |= __copy_to_user(((struct floppy_fdc_state32 *)arg)->track, f->track, sizeof(f->track));
+ break;
+ }
+ case FDWERRORGET32:
+ {
+ struct floppy_write_errors *f = karg;
+
+ err = __put_user(f->write_errors, &((struct floppy_write_errors32 *)arg)->write_errors);
+ err |= __put_user(f->first_error_sector, &((struct floppy_write_errors32 *)arg)->first_error_sector);
+ err |= __put_user(f->first_error_generation, &((struct floppy_write_errors32 *)arg)->first_error_generation);
+ err |= __put_user(f->last_error_sector, &((struct floppy_write_errors32 *)arg)->last_error_sector);
+ err |= __put_user(f->last_error_generation, &((struct floppy_write_errors32 *)arg)->last_error_generation);
+ err |= __put_user(f->badness, &((struct floppy_write_errors32 *)arg)->badness);
+ break;
+ }
+ default:
+ break;
+ }
+ if (err)
+ err = -EFAULT;
+
+out: if (karg) kfree(karg);
+ return err;
+}
+
+struct ppp_option_data32 {
+ __kernel_caddr_t32 ptr;
+ __u32 length;
+ int transmit;
+};
+#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
+
+struct ppp_idle32 {
+ __kernel_time_t32 xmit_idle;
+ __kernel_time_t32 recv_idle;
+};
+#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32)
+
+static int ppp_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct ppp_option_data32 data32;
+ struct ppp_option_data data;
+ struct ppp_idle32 idle32;
+ struct ppp_idle idle;
+ unsigned int kcmd;
+ void *karg;
+ int err = 0;
+
+ switch (cmd) {
+ case PPPIOCGIDLE32:
+ kcmd = PPPIOCGIDLE;
+ karg = &idle;
+ break;
+ case PPPIOCSCOMPRESS32:
+ if (copy_from_user(&data32, (struct ppp_option_data32 *)arg, sizeof(struct ppp_option_data32)))
+ return -EFAULT;
+ data.ptr = kmalloc (data32.length, GFP_KERNEL);
+ if (!data.ptr)
+ return -ENOMEM;
+ if (copy_from_user(data.ptr, (__u8 *)A(data32.ptr), data32.length)) {
+ kfree(data.ptr);
+ return -EFAULT;
+ }
+ data.length = data32.length;
+ data.transmit = data32.transmit;
+ kcmd = PPPIOCSCOMPRESS;
+ karg = &data;
+ break;
+ default:
+ do {
+ static int count = 0;
+ if (++count <= 20)
+ printk("ppp_ioctl: Unknown cmd fd(%d) "
+ "cmd(%08x) arg(%08x)\n",
+ (int)fd, (unsigned int)cmd, (unsigned int)arg);
+ } while (0);
+ return -EINVAL;
+ }
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, kcmd, (unsigned long)karg);
+ set_fs (old_fs);
+ switch (cmd) {
+ case PPPIOCGIDLE32:
+ if (err)
+ return err;
+ idle32.xmit_idle = idle.xmit_idle;
+ idle32.recv_idle = idle.recv_idle;
+ if (copy_to_user((struct ppp_idle32 *)arg, &idle32, sizeof(struct ppp_idle32)))
+ return -EFAULT;
+ break;
+ case PPPIOCSCOMPRESS32:
+ kfree(data.ptr);
+ break;
+ default:
+ break;
+ }
+ return err;
+}
+
+
+struct mtget32 {
+ __u32 mt_type;
+ __u32 mt_resid;
+ __u32 mt_dsreg;
+ __u32 mt_gstat;
+ __u32 mt_erreg;
+ __kernel_daddr_t32 mt_fileno;
+ __kernel_daddr_t32 mt_blkno;
+};
+#define MTIOCGET32 _IOR('m', 2, struct mtget32)
+
+struct mtpos32 {
+ __u32 mt_blkno;
+};
+#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
+
+struct mtconfiginfo32 {
+ __u32 mt_type;
+ __u32 ifc_type;
+ __u16 irqnr;
+ __u16 dmanr;
+ __u16 port;
+ __u32 debug;
+ __u32 have_dens:1;
+ __u32 have_bsf:1;
+ __u32 have_fsr:1;
+ __u32 have_bsr:1;
+ __u32 have_eod:1;
+ __u32 have_seek:1;
+ __u32 have_tell:1;
+ __u32 have_ras1:1;
+ __u32 have_ras2:1;
+ __u32 have_ras3:1;
+ __u32 have_qfa:1;
+ __u32 pad1:5;
+ char reserved[10];
+};
+#define MTIOCGETCONFIG32 _IOR('m', 4, struct mtconfiginfo32)
+#define MTIOCSETCONFIG32 _IOW('m', 5, struct mtconfiginfo32)
+
+static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct mtconfiginfo info;
+ struct mtget get;
+ struct mtpos pos;
+ unsigned long kcmd;
+ void *karg;
+ int err = 0;
+
+ switch(cmd) {
+ case MTIOCPOS32:
+ kcmd = MTIOCPOS;
+ karg = &pos;
+ break;
+ case MTIOCGET32:
+ kcmd = MTIOCGET;
+ karg = &get;
+ break;
+ case MTIOCGETCONFIG32:
+ kcmd = MTIOCGETCONFIG;
+ karg = &info;
+ break;
+ case MTIOCSETCONFIG32:
+ kcmd = MTIOCSETCONFIG;
+ karg = &info;
+ err = __get_user(info.mt_type, &((struct mtconfiginfo32 *)arg)->mt_type);
+ err |= __get_user(info.ifc_type, &((struct mtconfiginfo32 *)arg)->ifc_type);
+ err |= __get_user(info.irqnr, &((struct mtconfiginfo32 *)arg)->irqnr);
+ err |= __get_user(info.dmanr, &((struct mtconfiginfo32 *)arg)->dmanr);
+ err |= __get_user(info.port, &((struct mtconfiginfo32 *)arg)->port);
+ err |= __get_user(info.debug, &((struct mtconfiginfo32 *)arg)->debug);
+ err |= __copy_from_user((char *)&info.debug + sizeof(info.debug),
+ (char *)&((struct mtconfiginfo32 *)arg)->debug
+ + sizeof(((struct mtconfiginfo32 *)arg)->debug), sizeof(__u32));
+ if (err)
+ return -EFAULT;
+ break;
+ default:
+ do {
+ static int count = 0;
+ if (++count <= 20)
+ printk("mt_ioctl: Unknown cmd fd(%d) "
+ "cmd(%08x) arg(%08x)\n",
+ (int)fd, (unsigned int)cmd, (unsigned int)arg);
+ } while (0);
+ return -EINVAL;
+ }
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, kcmd, (unsigned long)karg);
+ set_fs (old_fs);
+ if (err)
+ return err;
+ switch (cmd) {
+ case MTIOCPOS32:
+ err = __put_user(pos.mt_blkno, &((struct mtpos32 *)arg)->mt_blkno);
+ break;
+ case MTIOCGET32:
+ err = __put_user(get.mt_type, &((struct mtget32 *)arg)->mt_type);
+ err |= __put_user(get.mt_resid, &((struct mtget32 *)arg)->mt_resid);
+ err |= __put_user(get.mt_dsreg, &((struct mtget32 *)arg)->mt_dsreg);
+ err |= __put_user(get.mt_gstat, &((struct mtget32 *)arg)->mt_gstat);
+ err |= __put_user(get.mt_erreg, &((struct mtget32 *)arg)->mt_erreg);
+ err |= __put_user(get.mt_fileno, &((struct mtget32 *)arg)->mt_fileno);
+ err |= __put_user(get.mt_blkno, &((struct mtget32 *)arg)->mt_blkno);
+ break;
+ case MTIOCGETCONFIG32:
+ err = __put_user(info.mt_type, &((struct mtconfiginfo32 *)arg)->mt_type);
+ err |= __put_user(info.ifc_type, &((struct mtconfiginfo32 *)arg)->ifc_type);
+ err |= __put_user(info.irqnr, &((struct mtconfiginfo32 *)arg)->irqnr);
+ err |= __put_user(info.dmanr, &((struct mtconfiginfo32 *)arg)->dmanr);
+ err |= __put_user(info.port, &((struct mtconfiginfo32 *)arg)->port);
+ err |= __put_user(info.debug, &((struct mtconfiginfo32 *)arg)->debug);
+ err |= __copy_to_user((char *)&((struct mtconfiginfo32 *)arg)->debug
+ + sizeof(((struct mtconfiginfo32 *)arg)->debug),
+ (char *)&info.debug + sizeof(info.debug), sizeof(__u32));
+ break;
+ case MTIOCSETCONFIG32:
+ break;
+ }
+ return err ? -EFAULT: 0;
+}
+
+struct cdrom_read32 {
+ int cdread_lba;
+ __kernel_caddr_t32 cdread_bufaddr;
+ int cdread_buflen;
+};
+
+struct cdrom_read_audio32 {
+ union cdrom_addr addr;
+ u_char addr_format;
+ int nframes;
+ __kernel_caddr_t32 buf;
+};
+
+struct cdrom_generic_command32 {
+ unsigned char cmd[CDROM_PACKET_SIZE];
+ __kernel_caddr_t32 buffer;
+ unsigned int buflen;
+ int stat;
+ __kernel_caddr_t32 sense;
+ __kernel_caddr_t32 reserved[3];
+};
+
+static int cdrom_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct cdrom_read cdread;
+ struct cdrom_read_audio cdreadaudio;
+ struct cdrom_generic_command cgc;
+ __kernel_caddr_t32 addr;
+ char *data = 0;
+ void *karg;
+ int err = 0;
+
+ switch(cmd) {
+ case CDROMREADMODE2:
+ case CDROMREADMODE1:
+ case CDROMREADRAW:
+ case CDROMREADCOOKED:
+ karg = &cdread;
+ err = __get_user(cdread.cdread_lba, &((struct cdrom_read32 *)arg)->cdread_lba);
+ err |= __get_user(addr, &((struct cdrom_read32 *)arg)->cdread_bufaddr);
+ err |= __get_user(cdread.cdread_buflen, &((struct cdrom_read32 *)arg)->cdread_buflen);
+ if (err)
+ return -EFAULT;
+ data = kmalloc(cdread.cdread_buflen, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ cdread.cdread_bufaddr = data;
+ break;
+ case CDROMREADAUDIO:
+ karg = &cdreadaudio;
+ err = copy_from_user(&cdreadaudio.addr, &((struct cdrom_read_audio32 *)arg)->addr, sizeof(cdreadaudio.addr));
+ err |= __get_user(cdreadaudio.addr_format, &((struct cdrom_read_audio32 *)arg)->addr_format);
+ err |= __get_user(cdreadaudio.nframes, &((struct cdrom_read_audio32 *)arg)->nframes);
+ err |= __get_user(addr, &((struct cdrom_read_audio32 *)arg)->buf);
+ if (err)
+ return -EFAULT;
+ data = kmalloc(cdreadaudio.nframes * 2352, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ cdreadaudio.buf = data;
+ break;
+ case CDROM_SEND_PACKET:
+ karg = &cgc;
+ err = copy_from_user(cgc.cmd, &((struct cdrom_generic_command32 *)arg)->cmd, sizeof(cgc.cmd));
+ err |= __get_user(addr, &((struct cdrom_generic_command32 *)arg)->buffer);
+ err |= __get_user(cgc.buflen, &((struct cdrom_generic_command32 *)arg)->buflen);
+ if (err)
+ return -EFAULT;
+ if ((data = kmalloc(cgc.buflen, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ cgc.buffer = data;
+ break;
+ default:
+ do {
+ static int count = 0;
+ if (++count <= 20)
+ printk("cdrom_ioctl: Unknown cmd fd(%d) "
+ "cmd(%08x) arg(%08x)\n",
+ (int)fd, (unsigned int)cmd, (unsigned int)arg);
+ } while (0);
+ return -EINVAL;
+ }
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)karg);
+ set_fs (old_fs);
+ if (err)
+ goto out;
+ switch (cmd) {
+ case CDROMREADMODE2:
+ case CDROMREADMODE1:
+ case CDROMREADRAW:
+ case CDROMREADCOOKED:
+ err = copy_to_user((char *)A(addr), data, cdread.cdread_buflen);
+ break;
+ case CDROMREADAUDIO:
+ err = copy_to_user((char *)A(addr), data, cdreadaudio.nframes * 2352);
+ break;
+ case CDROM_SEND_PACKET:
+ err = copy_to_user((char *)A(addr), data, cgc.buflen);
+ break;
+ default:
+ break;
+ }
+out: if (data)
+ kfree(data);
+ return err ? -EFAULT : 0;
+}
+
+struct loop_info32 {
+ int lo_number; /* ioctl r/o */
+ __kernel_dev_t32 lo_device; /* ioctl r/o */
+ unsigned int lo_inode; /* ioctl r/o */
+ __kernel_dev_t32 lo_rdevice; /* ioctl r/o */
+ int lo_offset;
+ int lo_encrypt_type;
+ int lo_encrypt_key_size; /* ioctl w/o */
+ int lo_flags; /* ioctl r/o */
+ char lo_name[LO_NAME_SIZE];
+ unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
+ unsigned int lo_init[2];
+ char reserved[4];
+};
+
+static int loop_status(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct loop_info l;
+ int err = -EINVAL;
+
+ switch(cmd) {
+ case LOOP_SET_STATUS:
+ err = get_user(l.lo_number, &((struct loop_info32 *)arg)->lo_number);
+ err |= __get_user(l.lo_device, &((struct loop_info32 *)arg)->lo_device);
+ err |= __get_user(l.lo_inode, &((struct loop_info32 *)arg)->lo_inode);
+ err |= __get_user(l.lo_rdevice, &((struct loop_info32 *)arg)->lo_rdevice);
+ err |= __copy_from_user((char *)&l.lo_offset, (char *)&((struct loop_info32 *)arg)->lo_offset,
+ 8 + (unsigned long)l.lo_init - (unsigned long)&l.lo_offset);
+ if (err) {
+ err = -EFAULT;
+ } else {
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&l);
+ set_fs (old_fs);
+ }
+ break;
+ case LOOP_GET_STATUS:
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&l);
+ set_fs (old_fs);
+ if (!err) {
+ err = put_user(l.lo_number, &((struct loop_info32 *)arg)->lo_number);
+ err |= __put_user(l.lo_device, &((struct loop_info32 *)arg)->lo_device);
+ err |= __put_user(l.lo_inode, &((struct loop_info32 *)arg)->lo_inode);
+ err |= __put_user(l.lo_rdevice, &((struct loop_info32 *)arg)->lo_rdevice);
+ err |= __copy_to_user((char *)&((struct loop_info32 *)arg)->lo_offset,
+ (char *)&l.lo_offset, (unsigned long)l.lo_init - (unsigned long)&l.lo_offset);
+ if (err)
+ err = -EFAULT;
+ }
+ break;
+ default: {
+ static int count = 0;
+ if (++count <= 20)
+ printk("%s: Unknown loop ioctl cmd, fd(%d) "
+ "cmd(%08x) arg(%08lx)\n",
+ __FUNCTION__, fd, cmd, arg);
+ }
+ }
+ return err;
+}
+
+extern int tty_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_VT
+static int vt_check(struct file *file)
+{
+ struct tty_struct *tty;
+ struct inode *inode = file->f_dentry->d_inode;
+
+ if (file->f_op->ioctl != tty_ioctl)
+ return -EINVAL;
+
+ tty = (struct tty_struct *)file->private_data;
+ if (tty_paranoia_check(tty, inode->i_rdev, "tty_ioctl"))
+ return -EINVAL;
+
+ if (tty->driver.ioctl != vt_ioctl)
+ return -EINVAL;
+
+ /*
+ * To have permissions to do most of the vt ioctls, we either have
+ * to be the owner of the tty, or super-user.
+ */
+ if (current->tty == tty || suser())
+ return 1;
+ return 0;
+}
+
+struct consolefontdesc32 {
+ unsigned short charcount; /* characters in font (256 or 512) */
+ unsigned short charheight; /* scan lines per character (1-32) */
+ u32 chardata; /* font data in expanded form */
+};
+
+static int do_fontx_ioctl(unsigned int fd, int cmd, struct consolefontdesc32 *user_cfd, struct file *file)
+{
+ struct consolefontdesc cfdarg;
+ struct console_font_op op;
+ int i, perm;
+
+ perm = vt_check(file);
+ if (perm < 0) return perm;
+
+ if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc32)))
+ return -EFAULT;
+
+ cfdarg.chardata = (unsigned char *)A(((struct consolefontdesc32 *)&cfdarg)->chardata);
+
+ switch (cmd) {
+ case PIO_FONTX:
+ if (!perm)
+ return -EPERM;
+ op.op = KD_FONT_OP_SET;
+ op.flags = 0;
+ op.width = 8;
+ op.height = cfdarg.charheight;
+ op.charcount = cfdarg.charcount;
+ op.data = cfdarg.chardata;
+ return con_font_op(fg_console, &op);
+ case GIO_FONTX:
+ if (!cfdarg.chardata)
+ return 0;
+ op.op = KD_FONT_OP_GET;
+ op.flags = 0;
+ op.width = 8;
+ op.height = cfdarg.charheight;
+ op.charcount = cfdarg.charcount;
+ op.data = cfdarg.chardata;
+ i = con_font_op(fg_console, &op);
+ if (i)
+ return i;
+ cfdarg.charheight = op.height;
+ cfdarg.charcount = op.charcount;
+ ((struct consolefontdesc32 *)&cfdarg)->chardata = (unsigned long)cfdarg.chardata;
+ if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc32)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+struct console_font_op32 {
+ unsigned int op; /* operation code KD_FONT_OP_* */
+ unsigned int flags; /* KD_FONT_FLAG_* */
+ unsigned int width, height; /* font size */
+ unsigned int charcount;
+ u32 data; /* font data with height fixed to 32 */
+};
+
+static int do_kdfontop_ioctl(unsigned int fd, unsigned int cmd, struct console_font_op32 *fontop, struct file *file)
+{
+ struct console_font_op op;
+ int perm = vt_check(file), i;
+ struct vt_struct *vt;
+
+ if (perm < 0) return perm;
+
+ if (copy_from_user(&op, (void *) fontop, sizeof(struct console_font_op32)))
+ return -EFAULT;
+ if (!perm && op.op != KD_FONT_OP_GET)
+ return -EPERM;
+ op.data = (unsigned char *)A(((struct console_font_op32 *)&op)->data);
+ op.flags |= KD_FONT_FLAG_OLD;
+ vt = (struct vt_struct *)((struct tty_struct *)file->private_data)->driver_data;
+ i = con_font_op(vt->vc_num, &op);
+ if (i) return i;
+ ((struct console_font_op32 *)&op)->data = (unsigned long)op.data;
+ if (copy_to_user((void *) fontop, &op, sizeof(struct console_font_op32)))
+ return -EFAULT;
+ return 0;
+}
+
+struct fb_fix_screeninfo32 {
+ char id[16]; /* identification string eg "TT Builtin" */
+ unsigned int smem_start; /* Start of frame buffer mem */
+ /* (physical address) */
+ __u32 smem_len; /* Length of frame buffer mem */
+ __u32 type; /* see FB_TYPE_* */
+ __u32 type_aux; /* Interleave for interleaved Planes */
+ __u32 visual; /* see FB_VISUAL_* */
+ __u16 xpanstep; /* zero if no hardware panning */
+ __u16 ypanstep; /* zero if no hardware panning */
+ __u16 ywrapstep; /* zero if no hardware ywrap */
+ __u32 line_length; /* length of a line in bytes */
+ unsigned int mmio_start; /* Start of Memory Mapped I/O */
+ /* (physical address) */
+ __u32 mmio_len; /* Length of Memory Mapped I/O */
+ __u32 accel; /* Type of acceleration available */
+ __u16 reserved[3]; /* Reserved for future compatibility */
+};
+
+static int do_fbioget_fscreeninfo_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct fb_fix_screeninfo fix;
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (long)&fix);
+ set_fs(old_fs);
+
+ if (err == 0) {
+ unsigned int smem_start = fix.smem_start; /* lose top 32 bits */
+ unsigned int mmio_start = fix.mmio_start; /* lose top 32 bits */
+ int i;
+
+ err = put_user(fix.id[0], &((struct fb_fix_screeninfo32 *)arg)->id[0]);
+ for (i=1; i<16; i++) {
+ err |= __put_user(fix.id[i], &((struct fb_fix_screeninfo32 *)arg)->id[i]);
+ }
+ err |= __put_user(smem_start, &((struct fb_fix_screeninfo32 *)arg)->smem_start);
+ err |= __put_user(fix.smem_len, &((struct fb_fix_screeninfo32 *)arg)->smem_len);
+ err |= __put_user(fix.type, &((struct fb_fix_screeninfo32 *)arg)->type);
+ err |= __put_user(fix.type_aux, &((struct fb_fix_screeninfo32 *)arg)->type_aux);
+ err |= __put_user(fix.visual, &((struct fb_fix_screeninfo32 *)arg)->visual);
+ err |= __put_user(fix.xpanstep, &((struct fb_fix_screeninfo32 *)arg)->xpanstep);
+ err |= __put_user(fix.ypanstep, &((struct fb_fix_screeninfo32 *)arg)->ypanstep);
+ err |= __put_user(fix.ywrapstep, &((struct fb_fix_screeninfo32 *)arg)->ywrapstep);
+ err |= __put_user(fix.line_length, &((struct fb_fix_screeninfo32 *)arg)->line_length);
+ err |= __put_user(mmio_start, &((struct fb_fix_screeninfo32 *)arg)->mmio_start);
+ err |= __put_user(fix.mmio_len, &((struct fb_fix_screeninfo32 *)arg)->mmio_len);
+ err |= __put_user(fix.accel, &((struct fb_fix_screeninfo32 *)arg)->accel);
+ err |= __put_user(fix.reserved[0], &((struct fb_fix_screeninfo32 *)arg)->reserved[0]);
+ err |= __put_user(fix.reserved[1], &((struct fb_fix_screeninfo32 *)arg)->reserved[1]);
+ err |= __put_user(fix.reserved[2], &((struct fb_fix_screeninfo32 *)arg)->reserved[2]);
+ if (err)
+ err = -EFAULT;
+ }
+ return err;
+}
+
+struct fb_cmap32 {
+ __u32 start; /* First entry */
+ __u32 len; /* Number of entries */
+ __u32 redptr; /* Red values */
+ __u32 greenptr;
+ __u32 blueptr;
+ __u32 transpptr; /* transparency, can be NULL */
+};
+
+static int do_fbiogetcmap_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct fb_cmap cmap;
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (long)&cmap);
+ set_fs(old_fs);
+
+ if (err == 0) {
+ __u32 redptr = (__u32)(__u64)cmap.red;
+ __u32 greenptr = (__u32)(__u64)cmap.green;
+ __u32 blueptr = (__u32)(__u64)cmap.blue;
+ __u32 transpptr = (__u32)(__u64)cmap.transp;
+
+ err = put_user(cmap.start, &((struct fb_cmap32 *)arg)->start);
+ err |= __put_user(cmap.len, &((struct fb_cmap32 *)arg)->len);
+ err |= __put_user(redptr, &((struct fb_cmap32 *)arg)->redptr);
+ err |= __put_user(greenptr, &((struct fb_cmap32 *)arg)->greenptr);
+ err |= __put_user(blueptr, &((struct fb_cmap32 *)arg)->blueptr);
+ err |= __put_user(transpptr, &((struct fb_cmap32 *)arg)->transpptr);
+ if (err)
+ err = -EFAULT;
+ }
+ return err;
+}
+
+static int do_fbioputcmap_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct fb_cmap cmap;
+ __u32 redptr, greenptr, blueptr, transpptr;
+ int err;
+
+ err = get_user(cmap.start, &((struct fb_cmap32 *)arg)->start);
+ err |= __get_user(cmap.len, &((struct fb_cmap32 *)arg)->len);
+ err |= __get_user(redptr, &((struct fb_cmap32 *)arg)->redptr);
+ err |= __get_user(greenptr, &((struct fb_cmap32 *)arg)->greenptr);
+ err |= __get_user(blueptr, &((struct fb_cmap32 *)arg)->blueptr);
+ err |= __get_user(transpptr, &((struct fb_cmap32 *)arg)->transpptr);
+
+ if (err) {
+ err = -EFAULT;
+ } else {
+ cmap.red = (__u16 *)(__u64)redptr;
+ cmap.green = (__u16 *)(__u64)greenptr;
+ cmap.blue = (__u16 *)(__u64)blueptr;
+ cmap.transp = (__u16 *)(__u64)transpptr;
+ set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&cmap);
+ set_fs (old_fs);
+ }
+ return err;
+}
+
+struct unimapdesc32 {
+ unsigned short entry_ct;
+ u32 entries;
+};
+
+static int do_unimap_ioctl(unsigned int fd, unsigned int cmd, struct unimapdesc32 *user_ud, struct file *file)
+{
+ struct unimapdesc32 tmp;
+ int perm = vt_check(file);
+
+ if (perm < 0) return perm;
+ if (copy_from_user(&tmp, user_ud, sizeof tmp))
+ return -EFAULT;
+ switch (cmd) {
+ case PIO_UNIMAP:
+ if (!perm) return -EPERM;
+ return con_set_unimap(fg_console, tmp.entry_ct, (struct unipair *)A(tmp.entries));
+ case GIO_UNIMAP:
+ return con_get_unimap(fg_console, tmp.entry_ct, &(user_ud->entry_ct), (struct unipair *)A(tmp.entries));
+ }
+ return 0;
+}
+#endif /* CONFIG_VT */
+static int do_smb_getmountuid(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ __kernel_uid_t kuid;
+ int err;
+
+ cmd = SMB_IOC_GETMOUNTUID;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&kuid);
+ set_fs(old_fs);
+
+ if (err >= 0)
+ err = put_user(kuid, (__kernel_uid_t32 *)arg);
+
+ return err;
+}
+
+struct atmif_sioc32 {
+ int number;
+ int length;
+ __kernel_caddr_t32 arg;
+};
+
+struct atm_iobuf32 {
+ int length;
+ __kernel_caddr_t32 buffer;
+};
+
+#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct atmif_sioc32)
+#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct atm_iobuf32)
+#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct atmif_sioc32)
+#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct atmif_sioc32)
+#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct atmif_sioc32)
+#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct atmif_sioc32)
+#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct atmif_sioc32)
+#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct atmif_sioc32)
+#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct atmif_sioc32)
+#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct atmif_sioc32)
+#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct atmif_sioc32)
+#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct atmif_sioc32)
+#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct atmif_sioc32)
+#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct atmif_sioc32)
+#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct atmif_sioc32)
+#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct atmif_sioc32)
+#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct atmif_sioc32)
+
+static struct {
+ unsigned int cmd32;
+ unsigned int cmd;
+} atm_ioctl_map[] = {
+ { ATM_GETLINKRATE32, ATM_GETLINKRATE },
+ { ATM_GETNAMES32, ATM_GETNAMES },
+ { ATM_GETTYPE32, ATM_GETTYPE },
+ { ATM_GETESI32, ATM_GETESI },
+ { ATM_GETADDR32, ATM_GETADDR },
+ { ATM_RSTADDR32, ATM_RSTADDR },
+ { ATM_ADDADDR32, ATM_ADDADDR },
+ { ATM_DELADDR32, ATM_DELADDR },
+ { ATM_GETCIRANGE32, ATM_GETCIRANGE },
+ { ATM_SETCIRANGE32, ATM_SETCIRANGE },
+ { ATM_SETESI32, ATM_SETESI },
+ { ATM_SETESIF32, ATM_SETESIF },
+ { ATM_GETSTAT32, ATM_GETSTAT },
+ { ATM_GETSTATZ32, ATM_GETSTATZ },
+ { ATM_GETLOOP32, ATM_GETLOOP },
+ { ATM_SETLOOP32, ATM_SETLOOP },
+ { ATM_QUERYLOOP32, ATM_QUERYLOOP }
+};
+
+#define NR_ATM_IOCTL (sizeof(atm_ioctl_map)/sizeof(atm_ioctl_map[0]))
+
+
+static int do_atm_iobuf(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct atm_iobuf32 iobuf32;
+ struct atm_iobuf iobuf = { 0, NULL };
+ mm_segment_t old_fs;
+ int err;
+
+ err = copy_from_user(&iobuf32, (struct atm_iobuf32*)arg,
+ sizeof(struct atm_iobuf32));
+ if (err)
+ return -EFAULT;
+
+ iobuf.length = iobuf32.length;
+
+ if (iobuf32.buffer == (__kernel_caddr_t32) NULL || iobuf32.length == 0) {
+ iobuf.buffer = (void*)(unsigned long)iobuf32.buffer;
+ } else {
+ iobuf.buffer = kmalloc(iobuf.length, GFP_KERNEL);
+ if (iobuf.buffer == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = copy_from_user(iobuf.buffer, (void *)A(iobuf32.buffer), iobuf.length);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ }
+
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&iobuf);
+ set_fs (old_fs);
+ if (err)
+ goto out;
+
+ if (iobuf.buffer && iobuf.length > 0) {
+ err = copy_to_user((void *)A(iobuf32.buffer), iobuf.buffer, iobuf.length);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ }
+ err = __put_user(iobuf.length, &(((struct atm_iobuf32*)arg)->length));
+
+ out:
+ if (iobuf32.buffer && iobuf32.length > 0)
+ kfree(iobuf.buffer);
+
+ return err;
+}
+
+
+static int do_atmif_sioc(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct atmif_sioc32 sioc32;
+ struct atmif_sioc sioc = { 0, 0, NULL };
+ mm_segment_t old_fs;
+ int err;
+
+ err = copy_from_user(&sioc32, (struct atmif_sioc32*)arg,
+ sizeof(struct atmif_sioc32));
+ if (err)
+ return -EFAULT;
+
+ sioc.number = sioc32.number;
+ sioc.length = sioc32.length;
+
+ if (sioc32.arg == (__kernel_caddr_t32) NULL || sioc32.length == 0) {
+ sioc.arg = (void*)(unsigned long)sioc32.arg;
+ } else {
+ sioc.arg = kmalloc(sioc.length, GFP_KERNEL);
+ if (sioc.arg == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = copy_from_user(sioc.arg, (void *)A(sioc32.arg), sioc32.length);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ }
+
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)&sioc);
+ set_fs (old_fs);
+ if (err) {
+ goto out;
+ }
+
+ if (sioc.arg && sioc.length > 0) {
+ err = copy_to_user((void *)A(sioc32.arg), sioc.arg, sioc.length);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ }
+ err = __put_user(sioc.length, &(((struct atmif_sioc32*)arg)->length));
+
+ out:
+ if (sioc32.arg && sioc32.length > 0)
+ kfree(sioc.arg);
+
+ return err;
+}
+
+
+static int do_atm_ioctl(unsigned int fd, unsigned int cmd32, unsigned long arg)
+{
+ int i;
+ unsigned int cmd = 0;
+
+ switch (cmd32) {
+ case SONET_GETSTAT:
+ case SONET_GETSTATZ:
+ case SONET_GETDIAG:
+ case SONET_SETDIAG:
+ case SONET_CLRDIAG:
+ case SONET_SETFRAMING:
+ case SONET_GETFRAMING:
+ case SONET_GETFRSENSE:
+ return do_atmif_sioc(fd, cmd32, arg);
+ }
+
+ for (i = 0; i < NR_ATM_IOCTL; i++) {
+ if (cmd32 == atm_ioctl_map[i].cmd32) {
+ cmd = atm_ioctl_map[i].cmd;
+ break;
+ }
+ }
+ if (i == NR_ATM_IOCTL) {
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case ATM_GETNAMES:
+ return do_atm_iobuf(fd, cmd, arg);
+
+ case ATM_GETLINKRATE:
+ case ATM_GETTYPE:
+ case ATM_GETESI:
+ case ATM_GETADDR:
+ case ATM_RSTADDR:
+ case ATM_ADDADDR:
+ case ATM_DELADDR:
+ case ATM_GETCIRANGE:
+ case ATM_SETCIRANGE:
+ case ATM_SETESI:
+ case ATM_SETESIF:
+ case ATM_GETSTAT:
+ case ATM_GETSTATZ:
+ case ATM_GETLOOP:
+ case ATM_SETLOOP:
+ case ATM_QUERYLOOP:
+ return do_atmif_sioc(fd, cmd, arg);
+ }
+
+ return -EINVAL;
+}
+
+#if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+/* Ugh, LVM. Pitty it was not cleaned up before accepted :((. */
+typedef struct {
+ uint8_t vg_name[NAME_LEN];
+ uint32_t vg_number;
+ uint32_t vg_access;
+ uint32_t vg_status;
+ uint32_t lv_max;
+ uint32_t lv_cur;
+ uint32_t lv_open;
+ uint32_t pv_max;
+ uint32_t pv_cur;
+ uint32_t pv_act;
+ uint32_t dummy;
+ uint32_t vgda;
+ uint32_t pe_size;
+ uint32_t pe_total;
+ uint32_t pe_allocated;
+ uint32_t pvg_total;
+ u32 proc;
+ u32 pv[ABS_MAX_PV + 1];
+ u32 lv[ABS_MAX_LV + 1];
+ uint8_t vg_uuid[UUID_LEN+1]; /* volume group UUID */
+ uint8_t dummy1[200];
+} vg32_t;
+
+typedef struct {
+ uint8_t id[2];
+ uint16_t version;
+ lvm_disk_data_t pv_on_disk;
+ lvm_disk_data_t vg_on_disk;
+ lvm_disk_data_t pv_namelist_on_disk;
+ lvm_disk_data_t lv_on_disk;
+ lvm_disk_data_t pe_on_disk;
+ uint8_t pv_name[NAME_LEN];
+ uint8_t vg_name[NAME_LEN];
+ uint8_t system_id[NAME_LEN];
+ kdev_t pv_dev;
+ uint32_t pv_number;
+ uint32_t pv_status;
+ uint32_t pv_allocatable;
+ uint32_t pv_size;
+ uint32_t lv_cur;
+ uint32_t pe_size;
+ uint32_t pe_total;
+ uint32_t pe_allocated;
+ uint32_t pe_stale;
+ u32 pe;
+ u32 inode;
+ uint8_t pv_uuid[UUID_LEN+1];
+} pv32_t;
+
+typedef struct {
+ char lv_name[NAME_LEN];
+ u32 lv;
+} lv_req32_t;
+
+typedef struct {
+ u32 lv_index;
+ u32 lv;
+ /* Transfer size because user space and kernel space differ */
+ uint16_t size;
+} lv_status_byindex_req32_t;
+
+typedef struct {
+ __kernel_dev_t32 dev;
+ u32 lv;
+} lv_status_bydev_req32_t;
+
+typedef struct {
+ uint8_t lv_name[NAME_LEN];
+ kdev_t old_dev;
+ kdev_t new_dev;
+ u32 old_pe;
+ u32 new_pe;
+} le_remap_req32_t;
+
+typedef struct {
+ char pv_name[NAME_LEN];
+ u32 pv;
+} pv_status_req32_t;
+
+typedef struct {
+ uint8_t lv_name[NAME_LEN];
+ uint8_t vg_name[NAME_LEN];
+ uint32_t lv_access;
+ uint32_t lv_status;
+ uint32_t lv_open;
+ kdev_t lv_dev;
+ uint32_t lv_number;
+ uint32_t lv_mirror_copies;
+ uint32_t lv_recovery;
+ uint32_t lv_schedule;
+ uint32_t lv_size;
+ u32 lv_current_pe;
+ uint32_t lv_current_le;
+ uint32_t lv_allocated_le;
+ uint32_t lv_stripes;
+ uint32_t lv_stripesize;
+ uint32_t lv_badblock;
+ uint32_t lv_allocation;
+ uint32_t lv_io_timeout;
+ uint32_t lv_read_ahead;
+ /* delta to version 1 starts here */
+ u32 lv_snapshot_org;
+ u32 lv_snapshot_prev;
+ u32 lv_snapshot_next;
+ u32 lv_block_exception;
+ uint32_t lv_remap_ptr;
+ uint32_t lv_remap_end;
+ uint32_t lv_chunk_size;
+ uint32_t lv_snapshot_minor;
+ char dummy[200];
+} lv32_t;
+
+typedef struct {
+ u32 hash[2];
+ u32 rsector_org;
+ kdev_t rdev_org;
+ u32 rsector_new;
+ kdev_t rdev_new;
+} lv_block_exception32_t;
+
+static void put_lv_t(lv_t *l)
+{
+ if (l->lv_current_pe) vfree(l->lv_current_pe);
+ if (l->lv_block_exception) vfree(l->lv_block_exception);
+ kfree(l);
+}
+
+static lv_t *get_lv_t(u32 p, int *errp)
+{
+ int err, i;
+ u32 ptr1, ptr2;
+ size_t size;
+ lv_block_exception32_t *lbe32;
+ lv_block_exception_t *lbe;
+ lv32_t *ul = (lv32_t *)A(p);
+ lv_t *l = (lv_t *) kmalloc(sizeof(lv_t), GFP_KERNEL);
+
+ if (!l) {
+ *errp = -ENOMEM;
+ return NULL;
+ }
+ memset(l, 0, sizeof(lv_t));
+ err = copy_from_user(l, ul, (long)&((lv32_t *)0)->lv_current_pe);
+ err |= __copy_from_user(&l->lv_current_le, &ul->lv_current_le,
+ ((long)&ul->lv_snapshot_org) - ((long)&ul->lv_current_le));
+ err |= __copy_from_user(&l->lv_remap_ptr, &ul->lv_remap_ptr,
+ ((long)&ul->dummy[0]) - ((long)&ul->lv_remap_ptr));
+ err |= __get_user(ptr1, &ul->lv_current_pe);
+ err |= __get_user(ptr2, &ul->lv_block_exception);
+ if (err) {
+ kfree(l);
+ *errp = -EFAULT;
+ return NULL;
+ }
+ if (ptr1) {
+ size = l->lv_allocated_le * sizeof(pe_t);
+ l->lv_current_pe = vmalloc(size);
+ if (l->lv_current_pe)
+ err = copy_from_user(l->lv_current_pe, (void *)A(ptr1), size);
+ }
+ if (!err && ptr2) {
+ size = l->lv_remap_end * sizeof(lv_block_exception_t);
+ l->lv_block_exception = lbe = vmalloc(size);
+ if (l->lv_block_exception) {
+ lbe32 = (lv_block_exception32_t *)A(ptr2);
+ memset(lbe, 0, size);
+ for (i = 0; i < l->lv_remap_end; i++, lbe++, lbe32++) {
+ err |= get_user(lbe->rsector_org, &lbe32->rsector_org);
+ err |= __get_user(lbe->rdev_org, &lbe32->rdev_org);
+ err |= __get_user(lbe->rsector_new, &lbe32->rsector_new);
+ err |= __get_user(lbe->rdev_new, &lbe32->rdev_new);
+ }
+ }
+ }
+ if (err || (ptr1 && !l->lv_current_pe) || (ptr2 && !l->lv_block_exception)) {
+ if (!err)
+ *errp = -ENOMEM;
+ else
+ *errp = -EFAULT;
+ put_lv_t(l);
+ return NULL;
+ }
+ return l;
+}
+
+static int copy_lv_t(u32 ptr, lv_t *l)
+{
+ int err;
+ lv32_t *ul = (lv32_t *)A(ptr);
+ u32 ptr1;
+ size_t size;
+
+ err = get_user(ptr1, &ul->lv_current_pe);
+ if (err)
+ return -EFAULT;
+ err = copy_to_user(ul, l, (long)&((lv32_t *)0)->lv_current_pe);
+ err |= __copy_to_user(&ul->lv_current_le, &l->lv_current_le,
+ ((long)&ul->lv_snapshot_org) - ((long)&ul->lv_current_le));
+ err |= __copy_to_user(&ul->lv_remap_ptr, &l->lv_remap_ptr,
+ ((long)&ul->dummy[0]) - ((long)&ul->lv_remap_ptr));
+ size = l->lv_allocated_le * sizeof(pe_t);
+ if (ptr1)
+ err |= __copy_to_user((void *)A(ptr1), l->lv_current_pe, size);
+ return err ? -EFAULT : 0;
+}
+
+static int do_lvm_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ vg_t *v = NULL;
+ union {
+ lv_req_t lv_req;
+ le_remap_req_t le_remap;
+ lv_status_byindex_req_t lv_byindex;
+ lv_status_bydev_req_t lv_bydev;
+ pv_status_req_t pv_status;
+ } u;
+ pv_t p;
+ int err;
+ u32 ptr = 0;
+ int i;
+ mm_segment_t old_fs;
+ void *karg = &u;
+
+ switch (cmd) {
+ case VG_STATUS:
+ v = kmalloc(sizeof(vg_t), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+ karg = v;
+ break;
+
+ case VG_CREATE_OLD:
+ case VG_CREATE:
+ v = kmalloc(sizeof(vg_t), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+ if (copy_from_user(v, (void *)arg, (long)&((vg32_t *)0)->proc)) {
+ kfree(v);
+ return -EFAULT;
+ }
+ /* 'proc' field is unused, just NULL it out. */
+ v->proc = NULL;
+ if (copy_from_user(v->vg_uuid, ((vg32_t *)arg)->vg_uuid, UUID_LEN+1)) {
+ kfree(v);
+ return -EFAULT;
+ }
+
+ karg = v;
+ memset(v->pv, 0, sizeof(v->pv) + sizeof(v->lv));
+ if (v->pv_max > ABS_MAX_PV || v->lv_max > ABS_MAX_LV)
+ return -EPERM;
+ for (i = 0; i < v->pv_max; i++) {
+ err = __get_user(ptr, &((vg32_t *)arg)->pv[i]);
+ if (err)
+ break;
+ if (ptr) {
+ v->pv[i] = kmalloc(sizeof(pv_t), GFP_KERNEL);
+ if (!v->pv[i]) {
+ err = -ENOMEM;
+ break;
+ }
+ err = copy_from_user(v->pv[i], (void *)A(ptr),
+ sizeof(pv32_t) - 8 - UUID_LEN+1);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = copy_from_user(v->pv[i]->pv_uuid,
+ ((pv32_t *)A(ptr))->pv_uuid,
+ UUID_LEN+1);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ v->pv[i]->pe = NULL;
+ v->pv[i]->bd = NULL;
+ }
+ }
+ if (!err) {
+ for (i = 0; i < v->lv_max; i++) {
+ err = __get_user(ptr, &((vg32_t *)arg)->lv[i]);
+ if (err)
+ break;
+ if (ptr) {
+ v->lv[i] = get_lv_t(ptr, &err);
+ if (err)
+ break;
+ }
+ }
+ }
+ break;
+
+ case LV_CREATE:
+ case LV_EXTEND:
+ case LV_REDUCE:
+ case LV_REMOVE:
+ case LV_RENAME:
+ case LV_STATUS_BYNAME:
+ err = copy_from_user(&u.pv_status, arg, sizeof(u.pv_status.pv_name));
+ if (err)
+ return -EFAULT;
+ if (cmd != LV_REMOVE) {
+ err = __get_user(ptr, &((lv_req32_t *)arg)->lv);
+ if (err)
+ return err;
+ u.lv_req.lv = get_lv_t(ptr, &err);
+ } else
+ u.lv_req.lv = NULL;
+ break;
+
+ case LV_STATUS_BYINDEX:
+ err = get_user(u.lv_byindex.lv_index,
+ &((lv_status_byindex_req32_t *)arg)->lv_index);
+ err |= __get_user(ptr, &((lv_status_byindex_req32_t *)arg)->lv);
+ if (err)
+ return err;
+ u.lv_byindex.lv = get_lv_t(ptr, &err);
+ break;
+
+ case LV_STATUS_BYDEV:
+ err = get_user(u.lv_bydev.dev, &((lv_status_bydev_req32_t *)arg)->dev);
+ err |= __get_user(ptr, &((lv_status_bydev_req32_t *)arg)->lv);
+ if (err)
+ return err;
+ u.lv_bydev.lv = get_lv_t(ptr, &err);
+ break;
+
+ case VG_EXTEND:
+ err = copy_from_user(&p, (void *)arg, sizeof(pv32_t) - 8 - UUID_LEN+1);
+ if (err)
+ return -EFAULT;
+ err = copy_from_user(p.pv_uuid, ((pv32_t *)arg)->pv_uuid, UUID_LEN+1);
+ if (err)
+ return -EFAULT;
+ p.pe = NULL;
+ p.bd = NULL;
+ karg = &p;
+ break;
+
+ case PV_CHANGE:
+ case PV_STATUS:
+ err = copy_from_user(&u.pv_status, arg, sizeof(u.lv_req.lv_name));
+ if (err)
+ return -EFAULT;
+ err = __get_user(ptr, &((pv_status_req32_t *)arg)->pv);
+ if (err)
+ return err;
+ u.pv_status.pv = &p;
+ if (cmd == PV_CHANGE) {
+ err = copy_from_user(&p, (void *)A(ptr),
+ sizeof(pv32_t) - 8 - UUID_LEN+1);
+ if (err)
+ return -EFAULT;
+ p.pe = NULL;
+ p.bd = NULL;
+ }
+ break;
+ };
+
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ err = sys_ioctl (fd, cmd, (unsigned long)karg);
+ set_fs (old_fs);
+
+ switch (cmd) {
+ case VG_STATUS:
+ if (!err) {
+ if (copy_to_user((void *)arg, v, (long)&((vg32_t *)0)->proc) ||
+ clear_user(&((vg32_t *)arg)->proc, sizeof(vg32_t) - (long)&((vg32_t *)0)->proc))
+ err = -EFAULT;
+ }
+ if (copy_to_user(((vg32_t *)arg)->vg_uuid, v->vg_uuid, UUID_LEN+1)) {
+ err = -EFAULT;
+ }
+ kfree(v);
+ break;
+
+ case VG_CREATE_OLD:
+ case VG_CREATE:
+ for (i = 0; i < v->pv_max; i++) {
+ if (v->pv[i])
+ kfree(v->pv[i]);
+ }
+ for (i = 0; i < v->lv_max; i++) {
+ if (v->lv[i])
+ put_lv_t(v->lv[i]);
+ }
+ kfree(v);
+ break;
+
+ case LV_STATUS_BYNAME:
+ if (!err && u.lv_req.lv)
+ err = copy_lv_t(ptr, u.lv_req.lv);
+ /* Fall through */
+
+ case LV_CREATE:
+ case LV_EXTEND:
+ case LV_REDUCE:
+ if (u.lv_req.lv)
+ put_lv_t(u.lv_req.lv);
+ break;
+
+ case LV_STATUS_BYINDEX:
+ if (u.lv_byindex.lv) {
+ if (!err)
+ err = copy_lv_t(ptr, u.lv_byindex.lv);
+ put_lv_t(u.lv_byindex.lv);
+ }
+ break;
+
+ case LV_STATUS_BYDEV:
+ if (u.lv_bydev.lv) {
+ if (!err)
+ err = copy_lv_t(ptr, u.lv_bydev.lv);
+ put_lv_t(u.lv_byindex.lv);
+ }
+ break;
+
+ case PV_STATUS:
+ if (!err) {
+ err = copy_to_user((void *)A(ptr), &p, sizeof(pv32_t) - 8 - UUID_LEN+1);
+ if (err)
+ return -EFAULT;
+ err = copy_to_user(((pv_t *)A(ptr))->pv_uuid, p.pv_uuid, UUID_LEN + 1);
+ if (err)
+ return -EFAULT;
+ }
+ break;
+ };
+
+ return err;
+}
+#endif
+
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+/* This really belongs in include/linux/drm.h -DaveM */
+#include "../../../drivers/char/drm/drm.h"
+
+typedef struct drm32_version {
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel;/* Patch level */
+ int name_len; /* Length of name buffer */
+ u32 name; /* Name of driver */
+ int date_len; /* Length of date buffer */
+ u32 date; /* User-space buffer to hold date */
+ int desc_len; /* Length of desc buffer */
+ u32 desc; /* User-space buffer to hold desc */
+} drm32_version_t;
+#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
+
+static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_version_t *uversion = (drm32_version_t *)arg;
+ char *name_ptr, *date_ptr, *desc_ptr;
+ u32 tmp1, tmp2, tmp3;
+ drm_version_t kversion;
+ mm_segment_t old_fs;
+ int ret;
+
+ memset(&kversion, 0, sizeof(kversion));
+ if (get_user(kversion.name_len, &uversion->name_len) ||
+ get_user(kversion.date_len, &uversion->date_len) ||
+ get_user(kversion.desc_len, &uversion->desc_len) ||
+ get_user(tmp1, &uversion->name) ||
+ get_user(tmp2, &uversion->date) ||
+ get_user(tmp3, &uversion->desc))
+ return -EFAULT;
+
+ name_ptr = (char *) A(tmp1);
+ date_ptr = (char *) A(tmp2);
+ desc_ptr = (char *) A(tmp3);
+
+ ret = -ENOMEM;
+ if (kversion.name_len && name_ptr) {
+ kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
+ if (!kversion.name)
+ goto out;
+ }
+ if (kversion.date_len && date_ptr) {
+ kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
+ if (!kversion.date)
+ goto out;
+ }
+ if (kversion.desc_len && desc_ptr) {
+ kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
+ if (!kversion.desc)
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if ((kversion.name &&
+ copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
+ (kversion.date &&
+ copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
+ (kversion.desc &&
+ copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
+ ret = -EFAULT;
+ if (put_user(kversion.version_major, &uversion->version_major) ||
+ put_user(kversion.version_minor, &uversion->version_minor) ||
+ put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
+ put_user(kversion.name_len, &uversion->name_len) ||
+ put_user(kversion.date_len, &uversion->date_len) ||
+ put_user(kversion.desc_len, &uversion->desc_len))
+ ret = -EFAULT;
+ }
+
+out:
+ if (kversion.name)
+ kfree(kversion.name);
+ if (kversion.date)
+ kfree(kversion.date);
+ if (kversion.desc)
+ kfree(kversion.desc);
+ return ret;
+}
+
+typedef struct drm32_unique {
+ int unique_len; /* Length of unique */
+ u32 unique; /* Unique name for driver instantiation */
+} drm32_unique_t;
+#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
+#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
+
+static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_unique_t *uarg = (drm32_unique_t *)arg;
+ drm_unique_t karg;
+ mm_segment_t old_fs;
+ char *uptr;
+ u32 tmp;
+ int ret;
+
+ if (get_user(karg.unique_len, &uarg->unique_len))
+ return -EFAULT;
+ karg.unique = NULL;
+
+ if (get_user(tmp, &uarg->unique))
+ return -EFAULT;
+
+ uptr = (char *) A(tmp);
+
+ if (uptr) {
+ karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
+ if (!karg.unique)
+ return -ENOMEM;
+ if (cmd == DRM32_IOCTL_SET_UNIQUE &&
+ copy_from_user(karg.unique, uptr, karg.unique_len)) {
+ kfree(karg.unique);
+ return -EFAULT;
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ if (cmd == DRM32_IOCTL_GET_UNIQUE)
+ ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
+ else
+ ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (cmd == DRM32_IOCTL_GET_UNIQUE &&
+ uptr != NULL &&
+ copy_to_user(uptr, karg.unique, karg.unique_len))
+ ret = -EFAULT;
+ if (put_user(karg.unique_len, &uarg->unique_len))
+ ret = -EFAULT;
+ }
+
+ if (karg.unique != NULL)
+ kfree(karg.unique);
+
+ return ret;
+}
+
+typedef struct drm32_map {
+ u32 offset; /* Requested physical address (0 for SAREA)*/
+ u32 size; /* Requested physical size (bytes) */
+ drm_map_type_t type; /* Type of memory to map */
+ drm_map_flags_t flags; /* Flags */
+ u32 handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* MTRR slot used */
+ /* Private data */
+} drm32_map_t;
+#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
+
+static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_map_t *uarg = (drm32_map_t *) arg;
+ drm_map_t karg;
+ mm_segment_t old_fs;
+ u32 tmp;
+ int ret;
+
+ ret = get_user(karg.offset, &uarg->offset);
+ ret |= get_user(karg.size, &uarg->size);
+ ret |= get_user(karg.type, &uarg->type);
+ ret |= get_user(karg.flags, &uarg->flags);
+ ret |= get_user(tmp, &uarg->handle);
+ ret |= get_user(karg.mtrr, &uarg->mtrr);
+ if (ret)
+ return -EFAULT;
+
+ karg.handle = (void *) A(tmp);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ ret = put_user(karg.offset, &uarg->offset);
+ ret |= put_user(karg.size, &uarg->size);
+ ret |= put_user(karg.type, &uarg->type);
+ ret |= put_user(karg.flags, &uarg->flags);
+ tmp = (u32) (long)karg.handle;
+ ret |= put_user(tmp, &uarg->handle);
+ ret |= put_user(karg.mtrr, &uarg->mtrr);
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+typedef struct drm32_buf_info {
+ int count; /* Entries in list */
+ u32 list; /* (drm_buf_desc_t *) */
+} drm32_buf_info_t;
+#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
+
+static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
+ drm_buf_desc_t *ulist;
+ drm_buf_info_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
+
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
+ return -EFAULT;
+
+ ulist = (drm_buf_desc_t *) A(tmp);
+
+ orig_count = karg.count;
+
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
+ if (!karg.list)
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (karg.count <= orig_count &&
+ (copy_to_user(ulist, karg.list,
+ karg.count * sizeof(drm_buf_desc_t))))
+ ret = -EFAULT;
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ kfree(karg.list);
+
+ return ret;
+}
+
+typedef struct drm32_buf_free {
+ int count;
+ u32 list; /* (int *) */
+} drm32_buf_free_t;
+#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
+
+static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
+ drm_buf_free_t karg;
+ mm_segment_t old_fs;
+ int *ulist;
+ int ret;
+ u32 tmp;
+
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
+ return -EFAULT;
+
+ ulist = (int *) A(tmp);
+
+ karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
+ goto out;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+out:
+ kfree(karg.list);
+
+ return ret;
+}
+
+typedef struct drm32_buf_pub {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int used; /* Amount of buffer in use (for DMA) */
+ u32 address; /* Address of buffer (void *) */
+} drm32_buf_pub_t;
+
+typedef struct drm32_buf_map {
+ int count; /* Length of buflist */
+ u32 virtual; /* Mmaped area in user-virtual (void *) */
+ u32 list; /* Buffer information (drm_buf_pub_t *) */
+} drm32_buf_map_t;
+#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
+
+static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
+ drm32_buf_pub_t *ulist;
+ drm_buf_map_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret, i;
+ u32 tmp1, tmp2;
+
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp1, &uarg->virtual) ||
+ get_user(tmp2, &uarg->list))
+ return -EFAULT;
+
+ karg.virtual = (void *) A(tmp1);
+ ulist = (drm32_buf_pub_t *) A(tmp2);
+
+ orig_count = karg.count;
+
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ for (i = 0; i < karg.count; i++) {
+ if (get_user(karg.list[i].idx, &ulist[i].idx) ||
+ get_user(karg.list[i].total, &ulist[i].total) ||
+ get_user(karg.list[i].used, &ulist[i].used) ||
+ get_user(tmp1, &ulist[i].address))
+ goto out;
+
+ karg.list[i].address = (void *) A(tmp1);
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ for (i = 0; i < orig_count; i++) {
+ tmp1 = (u32) (long) karg.list[i].address;
+ if (put_user(karg.list[i].idx, &ulist[i].idx) ||
+ put_user(karg.list[i].total, &ulist[i].total) ||
+ put_user(karg.list[i].used, &ulist[i].used) ||
+ put_user(tmp1, &ulist[i].address)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+out:
+ kfree(karg.list);
+ return ret;
+}
+
+typedef struct drm32_dma {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int context; /* Context handle */
+ int send_count; /* Number of buffers to send */
+ u32 send_indices; /* List of handles to buffers (int *) */
+ u32 send_sizes; /* Lengths of data to send (int *) */
+ drm_dma_flags_t flags; /* Flags */
+ int request_count; /* Number of buffers requested */
+ int request_size; /* Desired size for buffers */
+ u32 request_indices; /* Buffer information (int *) */
+ u32 request_sizes; /* (int *) */
+ int granted_count; /* Number of buffers granted */
+} drm32_dma_t;
+#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
+
+/* RED PEN The DRM layer blindly dereferences the send/request
+ * indice/size arrays even though they are userland
+ * pointers. -DaveM
+ */
+static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_dma_t *uarg = (drm32_dma_t *) arg;
+ int *u_si, *u_ss, *u_ri, *u_rs;
+ drm_dma_t karg;
+ mm_segment_t old_fs;
+ int ret;
+ u32 tmp1, tmp2, tmp3, tmp4;
+
+ karg.send_indices = karg.send_sizes = NULL;
+ karg.request_indices = karg.request_sizes = NULL;
+
+ if (get_user(karg.context, &uarg->context) ||
+ get_user(karg.send_count, &uarg->send_count) ||
+ get_user(tmp1, &uarg->send_indices) ||
+ get_user(tmp2, &uarg->send_sizes) ||
+ get_user(karg.flags, &uarg->flags) ||
+ get_user(karg.request_count, &uarg->request_count) ||
+ get_user(karg.request_size, &uarg->request_size) ||
+ get_user(tmp3, &uarg->request_indices) ||
+ get_user(tmp4, &uarg->request_sizes) ||
+ get_user(karg.granted_count, &uarg->granted_count))
+ return -EFAULT;
+
+ u_si = (int *) A(tmp1);
+ u_ss = (int *) A(tmp2);
+ u_ri = (int *) A(tmp3);
+ u_rs = (int *) A(tmp4);
+
+ if (karg.send_count) {
+ karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+ karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.send_indices || !karg.send_sizes)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.send_indices, u_si,
+ (karg.send_count * sizeof(int))) ||
+ copy_from_user(karg.send_sizes, u_ss,
+ (karg.send_count * sizeof(int))))
+ goto out;
+ }
+
+ if (karg.request_count) {
+ karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+ karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.request_indices || !karg.request_sizes)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.request_indices, u_ri,
+ (karg.request_count * sizeof(int))) ||
+ copy_from_user(karg.request_sizes, u_rs,
+ (karg.request_count * sizeof(int))))
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (put_user(karg.context, &uarg->context) ||
+ put_user(karg.send_count, &uarg->send_count) ||
+ put_user(karg.flags, &uarg->flags) ||
+ put_user(karg.request_count, &uarg->request_count) ||
+ put_user(karg.request_size, &uarg->request_size) ||
+ put_user(karg.granted_count, &uarg->granted_count))
+ ret = -EFAULT;
+
+ if (karg.send_count) {
+ if (copy_to_user(u_si, karg.send_indices,
+ (karg.send_count * sizeof(int))) ||
+ copy_to_user(u_ss, karg.send_sizes,
+ (karg.send_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ if (karg.request_count) {
+ if (copy_to_user(u_ri, karg.request_indices,
+ (karg.request_count * sizeof(int))) ||
+ copy_to_user(u_rs, karg.request_sizes,
+ (karg.request_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ }
+
+out:
+ if (karg.send_indices)
+ kfree(karg.send_indices);
+ if (karg.send_sizes)
+ kfree(karg.send_sizes);
+ if (karg.request_indices)
+ kfree(karg.request_indices);
+ if (karg.request_sizes)
+ kfree(karg.request_sizes);
+
+ return ret;
+}
+
+typedef struct drm32_ctx_res {
+ int count;
+ u32 contexts; /* (drm_ctx_t *) */
+} drm32_ctx_res_t;
+#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
+
+static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
+ drm_ctx_t *ulist;
+ drm_ctx_res_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
+
+ karg.contexts = NULL;
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->contexts))
+ return -EFAULT;
+
+ ulist = (drm_ctx_t *) A(tmp);
+
+ orig_count = karg.count;
+ if (karg.count && ulist) {
+ karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
+ if (!karg.contexts)
+ return -ENOMEM;
+ if (copy_from_user(karg.contexts, ulist,
+ (karg.count * sizeof(drm_ctx_t)))) {
+ kfree(karg.contexts);
+ return -EFAULT;
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (orig_count) {
+ if (copy_to_user(ulist, karg.contexts,
+ (orig_count * sizeof(drm_ctx_t))))
+ ret = -EFAULT;
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ if (karg.contexts)
+ kfree(karg.contexts);
+
+ return ret;
+}
+
+#endif
+
+static int ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+static int broken_blkgetsize(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ /* The mkswap binary hard codes it to Intel value :-((( */
+ return w_long(fd, BLKGETSIZE, arg);
+}
+
+struct blkpg_ioctl_arg32 {
+ int op;
+ int flags;
+ int datalen;
+ u32 data;
+};
+
+static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, struct blkpg_ioctl_arg32 *arg)
+{
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+ int err;
+ mm_segment_t old_fs = get_fs();
+
+ err = get_user(a.op, &arg->op);
+ err |= __get_user(a.flags, &arg->flags);
+ err |= __get_user(a.datalen, &arg->datalen);
+ err |= __get_user((long)a.data, &arg->data);
+ if (err) return err;
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+ case BLKPG_DEL_PARTITION:
+ if (a.datalen < sizeof(struct blkpg_partition))
+ return -EINVAL;
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+ a.data = &p;
+ set_fs (KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&a);
+ set_fs (old_fs);
+ default:
+ return -EINVAL;
+ }
+ return err;
+}
+
+static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
+}
+
+#if 0
+struct usbdevfs_ctrltransfer32 {
+ __u8 requesttype;
+ __u8 request;
+ __u16 value;
+ __u16 index;
+ __u16 length;
+ __u32 timeout; /* in milliseconds */
+ __u32 data;
+};
+
+#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
+
+static int do_usbdevfs_control(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct usbdevfs_ctrltransfer kctrl;
+ struct usbdevfs_ctrltransfer32 *uctrl;
+ mm_segment_t old_fs;
+ __u32 udata;
+ void *uptr, *kptr;
+ int err;
+
+ uctrl = (struct usbdevfs_ctrltransfer32 *) arg;
+
+ if (copy_from_user(&kctrl, uctrl,
+ (sizeof(struct usbdevfs_ctrltransfer) -
+ sizeof(void *))))
+ return -EFAULT;
+
+ if (get_user(udata, &uctrl->data))
+ return -EFAULT;
+ uptr = (void *) A(udata);
+
+ /* In usbdevice_fs, it limits the control buffer to a page,
+ * for simplicity so do we.
+ */
+ if (!uptr || kctrl.length > PAGE_SIZE)
+ return -EINVAL;
+
+ kptr = (void *)__get_free_page(GFP_KERNEL);
+
+ if ((kctrl.requesttype & 0x80) == 0) {
+ err = -EFAULT;
+ if (copy_from_user(kptr, uptr, kctrl.length))
+ goto out;
+ }
+
+ kctrl.data = kptr;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, USBDEVFS_CONTROL, (unsigned long)&kctrl);
+ set_fs(old_fs);
+
+ if (err >= 0 &&
+ ((kctrl.requesttype & 0x80) != 0)) {
+ if (copy_to_user(uptr, kptr, kctrl.length))
+ err = -EFAULT;
+ }
+
+out:
+ free_page((unsigned long) kptr);
+ return err;
+}
+
+struct usbdevfs_bulktransfer32 {
+ unsigned int ep;
+ unsigned int len;
+ unsigned int timeout; /* in milliseconds */
+ __u32 data;
+};
+
+#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
+
+static int do_usbdevfs_bulk(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct usbdevfs_bulktransfer kbulk;
+ struct usbdevfs_bulktransfer32 *ubulk;
+ mm_segment_t old_fs;
+ __u32 udata;
+ void *uptr, *kptr;
+ int err;
+
+ ubulk = (struct usbdevfs_bulktransfer32 *) arg;
+
+ if (get_user(kbulk.ep, &ubulk->ep) ||
+ get_user(kbulk.len, &ubulk->len) ||
+ get_user(kbulk.timeout, &ubulk->timeout) ||
+ get_user(udata, &ubulk->data))
+ return -EFAULT;
+
+ uptr = (void *) A(udata);
+
+ /* In usbdevice_fs, it limits the control buffer to a page,
+ * for simplicity so do we.
+ */
+ if (!uptr || kbulk.len > PAGE_SIZE)
+ return -EINVAL;
+
+ kptr = (void *) __get_free_page(GFP_KERNEL);
+
+ if ((kbulk.ep & 0x80) == 0) {
+ err = -EFAULT;
+ if (copy_from_user(kptr, uptr, kbulk.len))
+ goto out;
+ }
+
+ kbulk.data = kptr;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, USBDEVFS_BULK, (unsigned long) &kbulk);
+ set_fs(old_fs);
+
+ if (err >= 0 &&
+ ((kbulk.ep & 0x80) != 0)) {
+ if (copy_to_user(uptr, kptr, kbulk.len))
+ err = -EFAULT;
+ }
+
+out:
+ free_page((unsigned long) kptr);
+ return err;
+}
+
+/* This needs more work before we can enable it. Unfortunately
+ * because of the fancy asynchronous way URB status/error is written
+ * back to userspace, we'll need to fiddle with USB devio internals
+ * and/or reimplement entirely the frontend of it ourselves. -DaveM
+ *
+ * The issue is:
+ *
+ * When an URB is submitted via usbdevicefs it is put onto an
+ * asynchronous queue. When the URB completes, it may be reaped
+ * via another ioctl. During this reaping the status is written
+ * back to userspace along with the length of the transfer.
+ *
+ * We must translate into 64-bit kernel types so we pass in a kernel
+ * space copy of the usbdevfs_urb structure. This would mean that we
+ * must do something to deal with the async entry reaping. First we
+ * have to deal somehow with this transitory memory we've allocated.
+ * This is problematic since there are many call sites from which the
+ * async entries can be destroyed (and thus when we'd need to free up
+ * this kernel memory). One of which is the close() op of usbdevicefs.
+ * To handle that we'd need to make our own file_operations struct which
+ * overrides usbdevicefs's release op with our own which runs usbdevicefs's
+ * real release op then frees up the kernel memory.
+ *
+ * But how to keep track of these kernel buffers? We'd need to either
+ * keep track of them in some table _or_ know about usbdevicefs internals
+ * (ie. the exact layout of it's file private, which is actually defined
+ * in linux/usbdevice_fs.h, the layout of the async queues are private to
+ * devio.c)
+ *
+ * There is one possible other solution I considered, also involving knowledge
+ * of usbdevicefs internals:
+ *
+ * After an URB is submitted, we "fix up" the address back to the user
+ * space one. This would work if the status/length fields written back
+ * by the async URB completion lines up perfectly in the 32-bit type with
+ * the 64-bit kernel type. Unfortunately, it does not because the iso
+ * frame descriptors, at the end of the struct, can be written back.
+ *
+ * I think we'll just need to simply duplicate the devio URB engine here.
+ */
+#if 0
+struct usbdevfs_urb32 {
+ __u8 type;
+ __u8 endpoint;
+ __s32 status;
+ __u32 flags;
+ __u32 buffer;
+ __s32 buffer_length;
+ __s32 actual_length;
+ __s32 start_frame;
+ __s32 number_of_packets;
+ __s32 error_count;
+ __u32 signr;
+ __u32 usercontext; /* unused */
+ struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+};
+
+#define USBDEVFS_SUBMITURB32 _IOR('U', 10, struct usbdevfs_urb32)
+
+static int get_urb32(struct usbdevfs_urb *kurb,
+ struct usbdevfs_urb32 *uurb)
+{
+ if (get_user(kurb->type, &uurb->type) ||
+ __get_user(kurb->endpoint, &uurb->endpoint) ||
+ __get_user(kurb->status, &uurb->status) ||
+ __get_user(kurb->flags, &uurb->flags) ||
+ __get_user(kurb->buffer_length, &uurb->buffer_length) ||
+ __get_user(kurb->actual_length, &uurb->actual_length) ||
+ __get_user(kurb->start_frame, &uurb->start_frame) ||
+ __get_user(kurb->number_of_packets, &uurb->number_of_packets) ||
+ __get_user(kurb->error_count, &uurb->error_count) ||
+ __get_user(kurb->signr, &uurb->signr))
+ return -EFAULT;
+
+ kurb->usercontext = 0; /* unused currently */
+
+ return 0;
+}
+
+/* Just put back the values which usbdevfs actually changes. */
+static int put_urb32(struct usbdevfs_urb *kurb,
+ struct usbdevfs_urb32 *uurb)
+{
+ if (put_user(kurb->status, &uurb->status) ||
+ __put_user(kurb->actual_length, &uurb->actual_length) ||
+ __put_user(kurb->error_count, &uurb->error_count))
+ return -EFAULT;
+
+ if (kurb->number_of_packets != 0) {
+ int i;
+
+ for (i = 0; i < kurb->number_of_packets; i++) {
+ if (__put_user(kurb->iso_frame_desc[i].actual_length,
+ &uurb->iso_frame_desc[i].actual_length) ||
+ __put_user(kurb->iso_frame_desc[i].status,
+ &uurb->iso_frame_desc[i].status))
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int get_urb32_isoframes(struct usbdevfs_urb *kurb,
+ struct usbdevfs_urb32 *uurb)
+{
+ unsigned int totlen;
+ int i;
+
+ if (kurb->type != USBDEVFS_URB_TYPE_ISO) {
+ kurb->number_of_packets = 0;
+ return 0;
+ }
+
+ if (kurb->number_of_packets < 1 ||
+ kurb->number_of_packets > 128)
+ return -EINVAL;
+
+ if (copy_from_user(&kurb->iso_frame_desc[0],
+ &uurb->iso_frame_desc[0],
+ sizeof(struct usbdevfs_iso_packet_desc) *
+ kurb->number_of_packets))
+ return -EFAULT;
+
+ totlen = 0;
+ for (i = 0; i < kurb->number_of_packets; i++) {
+ unsigned int this_len;
+
+ this_len = kurb->iso_frame_desc[i].length;
+ if (this_len > 1023)
+ return -EINVAL;
+
+ totlen += this_len;
+ }
+
+ if (totlen > 32768)
+ return -EINVAL;
+
+ kurb->buffer_length = totlen;
+
+ return 0;
+}
+
+static int do_usbdevfs_urb(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct usbdevfs_urb *kurb;
+ struct usbdevfs_urb32 *uurb;
+ mm_segment_t old_fs;
+ __u32 udata;
+ void *uptr, *kptr;
+ unsigned int buflen;
+ int err;
+
+ uurb = (struct usbdevfs_urb32 *) arg;
+
+ err = -ENOMEM;
+ kurb = kmalloc(sizeof(struct usbdevfs_urb) +
+ (sizeof(struct usbdevfs_iso_packet_desc) * 128),
+ GFP_KERNEL);
+ if (!kurb)
+ goto out;
+
+ err = -EFAULT;
+ if (get_urb32(kurb, uurb))
+ goto out;
+
+ err = get_urb32_isoframes(kurb, uurb);
+ if (err)
+ goto out;
+
+ err = -EFAULT;
+ if (__get_user(udata, &uurb->buffer))
+ goto out;
+ uptr = (void *) A(udata);
+
+ err = -ENOMEM;
+ buflen = kurb->buffer_length;
+ kptr = kmalloc(buflen, GFP_KERNEL);
+ if (!kptr)
+ goto out;
+
+ kurb->buffer = kptr;
+
+ err = -EFAULT;
+ if (copy_from_user(kptr, uptr, buflen))
+ goto out_kptr;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, USBDEVFS_SUBMITURB, (unsigned long) kurb);
+ set_fs(old_fs);
+
+ if (err >= 0) {
+ /* XXX Shit, this doesn't work for async URBs :-( XXX */
+ if (put_urb32(kurb, uurb)) {
+ err = -EFAULT;
+ } else if ((kurb->endpoint & USB_DIR_IN) != 0) {
+ if (copy_to_user(uptr, kptr, buflen))
+ err = -EFAULT;
+ }
+ }
+
+out_kptr:
+ kfree(kptr);
+
+out:
+ kfree(kurb);
+ return err;
+}
+#endif
+
+#define USBDEVFS_REAPURB32 _IOW('U', 12, u32)
+#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, u32)
+
+static int do_usbdevfs_reapurb(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs;
+ void *kptr;
+ int err;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd,
+ (cmd == USBDEVFS_REAPURB32 ?
+ USBDEVFS_REAPURB :
+ USBDEVFS_REAPURBNDELAY),
+ (unsigned long) &kptr);
+ set_fs(old_fs);
+
+ if (err >= 0 &&
+ put_user(((u32)(long)kptr), (u32 *) A(arg)))
+ err = -EFAULT;
+
+ return err;
+}
+
+struct usbdevfs_disconnectsignal32 {
+ unsigned int signr;
+ u32 context;
+};
+
+#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
+
+static int do_usbdevfs_discsignal(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct usbdevfs_disconnectsignal kdis;
+ struct usbdevfs_disconnectsignal32 *udis;
+ mm_segment_t old_fs;
+ u32 uctx;
+ int err;
+
+ udis = (struct usbdevfs_disconnectsignal32 *) arg;
+
+ if (get_user(kdis.signr, &udis->signr) ||
+ __get_user(uctx, &udis->context))
+ return -EFAULT;
+
+ kdis.context = (void *) (long)uctx;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, USBDEVFS_DISCSIGNAL, (unsigned long) &kdis);
+ set_fs(old_fs);
+
+ return err;
+}
+#endif
+
+struct mtd_oob_buf32 {
+ u32 start;
+ u32 length;
+ u32 ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M',3,struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M',4,struct mtd_oob_buf32)
+
+static inline int
+mtd_rw_oob(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ mm_segment_t old_fs = get_fs();
+ struct mtd_oob_buf32 *uarg = (struct mtd_oob_buf32 *)arg;
+ struct mtd_oob_buf karg;
+ u32 tmp;
+ char *ptr;
+ int ret;
+
+ if (get_user(karg.start, &uarg->start) ||
+ get_user(karg.length, &uarg->length) ||
+ get_user(tmp, &uarg->ptr))
+ return -EFAULT;
+
+ ptr = (char *)A(tmp);
+ if (0 >= karg.length)
+ return -EINVAL;
+
+ karg.ptr = kmalloc(karg.length, GFP_KERNEL);
+ if (NULL == karg.ptr)
+ return -ENOMEM;
+
+ if (copy_from_user(karg.ptr, ptr, karg.length)) {
+ kfree(karg.ptr);
+ return -EFAULT;
+ }
+
+ set_fs(KERNEL_DS);
+ if (MEMREADOOB32 == cmd)
+ ret = sys_ioctl(fd, MEMREADOOB, (unsigned long)&karg);
+ else if (MEMWRITEOOB32 == cmd)
+ ret = sys_ioctl(fd, MEMWRITEOOB, (unsigned long)&karg);
+ else
+ ret = -EINVAL;
+ set_fs(old_fs);
+
+ if (0 == ret && cmd == MEMREADOOB32) {
+ ret = copy_to_user(ptr, karg.ptr, karg.length);
+ ret |= put_user(karg.start, &uarg->start);
+ ret |= put_user(karg.length, &uarg->length);
+ }
+
+ kfree(karg.ptr);
+ return ((0 == ret) ? 0 : -EFAULT);
+}
+
+struct sg_io_hdr_32
+{
+ int interface_id;
+ int dxfer_direction;
+ unsigned char cmd_len;
+ unsigned char mx_sb_len;
+ unsigned short iovec_count;
+ unsigned int dxfer_len;
+ u32 dxferp;
+ u32 cmdp;
+ u32 sbp;
+ unsigned int timeout;
+ unsigned int flags;
+ int pack_id;
+ u32 usr_ptr;
+ unsigned char status;
+ unsigned char masked_status;
+ unsigned char msg_status;
+ unsigned char sb_len_wr;
+ unsigned short host_status;
+ unsigned short driver_status;
+ int resid;
+ unsigned int duration;
+ unsigned int info;
+};
+
+static int do_sg_io(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct sg_io_hdr *sg = kmalloc(sizeof(struct sg_io_hdr), GFP_KERNEL);
+ struct sg_io_hdr_32 *sg_32 = (struct sg_io_hdr_32 *)arg;
+ u32 dxferp_32;
+ u32 cmdp_32;
+ u32 sbp_32;
+ u32 usr_ptr_32;
+ int ret = -EFAULT;
+ int err;
+ mm_segment_t old_fs = get_fs();
+
+ if (!sg)
+ return -ENOMEM;
+
+ memset(sg, 0, sizeof(*sg));
+
+ err = copy_from_user(sg, sg_32, offsetof(struct sg_io_hdr, dxferp));
+ err |= __get_user(dxferp_32, &sg_32->dxferp);
+ err |= __get_user(cmdp_32, &sg_32->cmdp);
+ err |= __get_user(sbp_32, &sg_32->sbp);
+
+ if (err)
+ goto error;
+
+ sg->dxferp = (void *)A(dxferp_32);
+ sg->cmdp = (void *)A(cmdp_32);
+ sg->sbp = (void *)A(sbp_32);
+
+ err = __copy_from_user(&sg->timeout, &sg_32->timeout,
+ (long)&sg->usr_ptr - (long)&sg->timeout);
+
+ err |= __get_user(usr_ptr_32, &sg_32->usr_ptr);
+
+ if (err)
+ goto error;
+
+ sg->usr_ptr = (void *)A(usr_ptr_32);
+
+ err = __copy_from_user(&sg->status, &sg_32->status,
+ sizeof(struct sg_io_hdr) -
+ offsetof(struct sg_io_hdr, status));
+
+ if (err)
+ goto error;
+
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, cmd, (unsigned long)sg);
+ set_fs(old_fs);
+
+ err = copy_to_user(sg_32, sg, offsetof(struct sg_io_hdr, dxferp));
+
+ dxferp_32 = (unsigned long)sg->dxferp;
+ cmdp_32 = (unsigned long)sg->cmdp;
+ sbp_32 = (unsigned long)sg->sbp;
+ err |= __put_user(dxferp_32, &sg_32->dxferp);
+ err |= __put_user(cmdp_32, &sg_32->cmdp);
+ err |= __put_user(sbp_32, &sg_32->sbp);
+
+ if (err) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ err = __copy_to_user(&sg_32->timeout, &sg->timeout,
+ (long)&sg->usr_ptr - (long)&sg->timeout);
+
+ usr_ptr_32 = (unsigned long)sg->usr_ptr;
+ err |= __put_user(usr_ptr_32, &sg_32->usr_ptr);
+
+ if (err) {
+ ret = -EFAULT;
+ goto error;
+ }
+
+ err = __copy_to_user(&sg_32->status, &sg->status,
+ sizeof(struct sg_io_hdr) -
+ offsetof(struct sg_io_hdr, status));
+
+ if (err)
+ ret = -EFAULT;
+
+error:
+ kfree(sg);
+ return ret;
+}
+
+struct ioctl_trans {
+ unsigned long cmd;
+ unsigned long handler;
+ unsigned long next;
+};
+
+#define COMPATIBLE_IOCTL(cmd) { cmd, (unsigned long)sys_ioctl, 0 }
+
+#define HANDLE_IOCTL(cmd,handler) { cmd, (unsigned long)handler, 0 }
+
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
+#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, __kernel_uid_t32)
+
+static struct ioctl_trans ioctl_translations[] = {
+ /* List here explicitly which ioctl's need translation,
+ * all others default to calling sys_ioctl().
+ */
+/* Big T */
+COMPATIBLE_IOCTL(TCGETA),
+COMPATIBLE_IOCTL(TCSETA),
+COMPATIBLE_IOCTL(TCSETAW),
+COMPATIBLE_IOCTL(TCSETAF),
+COMPATIBLE_IOCTL(TCSBRK),
+COMPATIBLE_IOCTL(TCXONC),
+COMPATIBLE_IOCTL(TCFLSH),
+COMPATIBLE_IOCTL(TCGETS),
+COMPATIBLE_IOCTL(TCSETS),
+COMPATIBLE_IOCTL(TCSETSW),
+COMPATIBLE_IOCTL(TCSETSF),
+COMPATIBLE_IOCTL(TIOCLINUX),
+COMPATIBLE_IOCTL(TIOCSTART),
+/* Little t */
+COMPATIBLE_IOCTL(TIOCGETD),
+COMPATIBLE_IOCTL(TIOCSETD),
+COMPATIBLE_IOCTL(TIOCEXCL),
+COMPATIBLE_IOCTL(TIOCNXCL),
+COMPATIBLE_IOCTL(TIOCCONS),
+COMPATIBLE_IOCTL(TIOCGSOFTCAR),
+COMPATIBLE_IOCTL(TIOCSSOFTCAR),
+COMPATIBLE_IOCTL(TIOCSWINSZ),
+COMPATIBLE_IOCTL(TIOCGWINSZ),
+COMPATIBLE_IOCTL(TIOCMGET),
+COMPATIBLE_IOCTL(TIOCMBIC),
+COMPATIBLE_IOCTL(TIOCMBIS),
+COMPATIBLE_IOCTL(TIOCMSET),
+COMPATIBLE_IOCTL(TIOCPKT),
+COMPATIBLE_IOCTL(TIOCNOTTY),
+COMPATIBLE_IOCTL(TIOCSTI),
+COMPATIBLE_IOCTL(TIOCOUTQ),
+COMPATIBLE_IOCTL(TIOCSPGRP),
+COMPATIBLE_IOCTL(TIOCGPGRP),
+COMPATIBLE_IOCTL(TIOCSCTTY),
+COMPATIBLE_IOCTL(TIOCGPTN),
+COMPATIBLE_IOCTL(TIOCSPTLCK),
+COMPATIBLE_IOCTL(TIOCGSERIAL),
+COMPATIBLE_IOCTL(TIOCSSERIAL),
+COMPATIBLE_IOCTL(TIOCSERGETLSR),
+COMPATIBLE_IOCTL(TIOCSLTC),
+/* Big F */
+COMPATIBLE_IOCTL(FBIOGET_VSCREENINFO),
+COMPATIBLE_IOCTL(FBIOPUT_VSCREENINFO),
+COMPATIBLE_IOCTL(FBIOPAN_DISPLAY),
+COMPATIBLE_IOCTL(FBIOGET_FCURSORINFO),
+COMPATIBLE_IOCTL(FBIOGET_VCURSORINFO),
+COMPATIBLE_IOCTL(FBIOPUT_VCURSORINFO),
+COMPATIBLE_IOCTL(FBIOGET_CURSORSTATE),
+COMPATIBLE_IOCTL(FBIOPUT_CURSORSTATE),
+COMPATIBLE_IOCTL(FBIOGET_CON2FBMAP),
+COMPATIBLE_IOCTL(FBIOPUT_CON2FBMAP),
+#if 0
+COMPATIBLE_IOCTL(FBIOBLANK),
+#endif
+/* Little f */
+COMPATIBLE_IOCTL(FIOCLEX),
+COMPATIBLE_IOCTL(FIONCLEX),
+COMPATIBLE_IOCTL(FIOASYNC),
+COMPATIBLE_IOCTL(FIONBIO),
+COMPATIBLE_IOCTL(FIONREAD), /* This is also TIOCINQ */
+/* 0x00 */
+COMPATIBLE_IOCTL(FIBMAP),
+COMPATIBLE_IOCTL(FIGETBSZ),
+/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
+ * Some need translations, these do not.
+ */
+COMPATIBLE_IOCTL(HDIO_GET_IDENTITY),
+COMPATIBLE_IOCTL(HDIO_SET_DMA),
+COMPATIBLE_IOCTL(HDIO_SET_KEEPSETTINGS),
+COMPATIBLE_IOCTL(HDIO_SET_UNMASKINTR),
+COMPATIBLE_IOCTL(HDIO_SET_NOWERR),
+COMPATIBLE_IOCTL(HDIO_SET_32BIT),
+COMPATIBLE_IOCTL(HDIO_SET_MULTCOUNT),
+COMPATIBLE_IOCTL(HDIO_DRIVE_CMD),
+COMPATIBLE_IOCTL(HDIO_SET_PIO_MODE),
+COMPATIBLE_IOCTL(HDIO_SCAN_HWIF),
+COMPATIBLE_IOCTL(HDIO_SET_NICE),
+/* 0x02 -- Floppy ioctls */
+COMPATIBLE_IOCTL(FDMSGON),
+COMPATIBLE_IOCTL(FDMSGOFF),
+COMPATIBLE_IOCTL(FDSETEMSGTRESH),
+COMPATIBLE_IOCTL(FDFLUSH),
+COMPATIBLE_IOCTL(FDWERRORCLR),
+COMPATIBLE_IOCTL(FDSETMAXERRS),
+COMPATIBLE_IOCTL(FDGETMAXERRS),
+COMPATIBLE_IOCTL(FDGETDRVTYP),
+COMPATIBLE_IOCTL(FDEJECT),
+COMPATIBLE_IOCTL(FDCLRPRM),
+COMPATIBLE_IOCTL(FDFMTBEG),
+COMPATIBLE_IOCTL(FDFMTEND),
+COMPATIBLE_IOCTL(FDRESET),
+COMPATIBLE_IOCTL(FDTWADDLE),
+COMPATIBLE_IOCTL(FDFMTTRK),
+COMPATIBLE_IOCTL(FDRAWCMD),
+/* 0x12 */
+COMPATIBLE_IOCTL(BLKROSET),
+COMPATIBLE_IOCTL(BLKROGET),
+COMPATIBLE_IOCTL(BLKRRPART),
+COMPATIBLE_IOCTL(BLKFLSBUF),
+COMPATIBLE_IOCTL(BLKRASET),
+COMPATIBLE_IOCTL(BLKFRASET),
+COMPATIBLE_IOCTL(BLKSECTSET),
+COMPATIBLE_IOCTL(BLKSSZGET),
+COMPATIBLE_IOCTL(BLKBSZGET),
+COMPATIBLE_IOCTL(BLKBSZSET),
+COMPATIBLE_IOCTL(BLKGETSIZE64),
+
+/* RAID */
+COMPATIBLE_IOCTL(RAID_VERSION),
+COMPATIBLE_IOCTL(GET_ARRAY_INFO),
+COMPATIBLE_IOCTL(GET_DISK_INFO),
+COMPATIBLE_IOCTL(PRINT_RAID_DEBUG),
+COMPATIBLE_IOCTL(CLEAR_ARRAY),
+COMPATIBLE_IOCTL(ADD_NEW_DISK),
+COMPATIBLE_IOCTL(HOT_REMOVE_DISK),
+COMPATIBLE_IOCTL(SET_ARRAY_INFO),
+COMPATIBLE_IOCTL(SET_DISK_INFO),
+COMPATIBLE_IOCTL(WRITE_RAID_INFO),
+COMPATIBLE_IOCTL(UNPROTECT_ARRAY),
+COMPATIBLE_IOCTL(PROTECT_ARRAY),
+COMPATIBLE_IOCTL(HOT_ADD_DISK),
+COMPATIBLE_IOCTL(SET_DISK_FAULTY),
+COMPATIBLE_IOCTL(RUN_ARRAY),
+COMPATIBLE_IOCTL(START_ARRAY),
+COMPATIBLE_IOCTL(STOP_ARRAY),
+COMPATIBLE_IOCTL(STOP_ARRAY_RO),
+COMPATIBLE_IOCTL(RESTART_ARRAY_RW),
+/* Big K */
+COMPATIBLE_IOCTL(PIO_FONT),
+COMPATIBLE_IOCTL(GIO_FONT),
+COMPATIBLE_IOCTL(KDSIGACCEPT),
+COMPATIBLE_IOCTL(KDGETKEYCODE),
+COMPATIBLE_IOCTL(KDSETKEYCODE),
+COMPATIBLE_IOCTL(KIOCSOUND),
+COMPATIBLE_IOCTL(KDMKTONE),
+COMPATIBLE_IOCTL(KDGKBTYPE),
+COMPATIBLE_IOCTL(KDSETMODE),
+COMPATIBLE_IOCTL(KDGETMODE),
+COMPATIBLE_IOCTL(KDSKBMODE),
+COMPATIBLE_IOCTL(KDGKBMODE),
+COMPATIBLE_IOCTL(KDSKBMETA),
+COMPATIBLE_IOCTL(KDGKBMETA),
+COMPATIBLE_IOCTL(KDGKBENT),
+COMPATIBLE_IOCTL(KDSKBENT),
+COMPATIBLE_IOCTL(KDGKBSENT),
+COMPATIBLE_IOCTL(KDSKBSENT),
+COMPATIBLE_IOCTL(KDGKBDIACR),
+COMPATIBLE_IOCTL(KDKBDREP),
+COMPATIBLE_IOCTL(KDSKBDIACR),
+COMPATIBLE_IOCTL(KDGKBLED),
+COMPATIBLE_IOCTL(KDSKBLED),
+COMPATIBLE_IOCTL(KDGETLED),
+COMPATIBLE_IOCTL(KDSETLED),
+COMPATIBLE_IOCTL(GIO_SCRNMAP),
+COMPATIBLE_IOCTL(PIO_SCRNMAP),
+COMPATIBLE_IOCTL(GIO_UNISCRNMAP),
+COMPATIBLE_IOCTL(PIO_UNISCRNMAP),
+COMPATIBLE_IOCTL(PIO_FONTRESET),
+COMPATIBLE_IOCTL(PIO_UNIMAPCLR),
+/* Big S */
+COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN),
+COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST),
+COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK),
+COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK),
+COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY),
+COMPATIBLE_IOCTL(SCSI_IOCTL_TAGGED_ENABLE),
+COMPATIBLE_IOCTL(SCSI_IOCTL_TAGGED_DISABLE),
+COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER),
+COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND),
+/* Big V */
+COMPATIBLE_IOCTL(VT_SETMODE),
+COMPATIBLE_IOCTL(VT_GETMODE),
+COMPATIBLE_IOCTL(VT_GETSTATE),
+COMPATIBLE_IOCTL(VT_OPENQRY),
+COMPATIBLE_IOCTL(VT_ACTIVATE),
+COMPATIBLE_IOCTL(VT_WAITACTIVE),
+COMPATIBLE_IOCTL(VT_RELDISP),
+COMPATIBLE_IOCTL(VT_DISALLOCATE),
+COMPATIBLE_IOCTL(VT_RESIZE),
+COMPATIBLE_IOCTL(VT_RESIZEX),
+COMPATIBLE_IOCTL(VT_LOCKSWITCH),
+COMPATIBLE_IOCTL(VT_UNLOCKSWITCH),
+/* Little v, the video4linux ioctls */
+COMPATIBLE_IOCTL(VIDIOCGCAP),
+COMPATIBLE_IOCTL(VIDIOCGCHAN),
+COMPATIBLE_IOCTL(VIDIOCSCHAN),
+COMPATIBLE_IOCTL(VIDIOCGPICT),
+COMPATIBLE_IOCTL(VIDIOCSPICT),
+COMPATIBLE_IOCTL(VIDIOCCAPTURE),
+COMPATIBLE_IOCTL(VIDIOCKEY),
+COMPATIBLE_IOCTL(VIDIOCGAUDIO),
+COMPATIBLE_IOCTL(VIDIOCSAUDIO),
+COMPATIBLE_IOCTL(VIDIOCSYNC),
+COMPATIBLE_IOCTL(VIDIOCMCAPTURE),
+COMPATIBLE_IOCTL(VIDIOCGMBUF),
+COMPATIBLE_IOCTL(VIDIOCGUNIT),
+COMPATIBLE_IOCTL(VIDIOCGCAPTURE),
+COMPATIBLE_IOCTL(VIDIOCSCAPTURE),
+/* BTTV specific... */
+COMPATIBLE_IOCTL(_IOW('v', BASE_VIDIOCPRIVATE+0, char [256])),
+COMPATIBLE_IOCTL(_IOR('v', BASE_VIDIOCPRIVATE+1, char [256])),
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+2, unsigned int)),
+COMPATIBLE_IOCTL(_IOW('v' , BASE_VIDIOCPRIVATE+3, char [16])), /* struct bttv_pll_info */
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+4, int)),
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+5, int)),
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+6, int)),
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+7, int)),
+/* Little p (/dev/rtc, /dev/envctrl, etc.) */
+COMPATIBLE_IOCTL(_IOR('p', 20, int[7])), /* RTCGET */
+COMPATIBLE_IOCTL(_IOW('p', 21, int[7])), /* RTCSET */
+COMPATIBLE_IOCTL(RTC_AIE_ON),
+COMPATIBLE_IOCTL(RTC_AIE_OFF),
+COMPATIBLE_IOCTL(RTC_UIE_ON),
+COMPATIBLE_IOCTL(RTC_UIE_OFF),
+COMPATIBLE_IOCTL(RTC_PIE_ON),
+COMPATIBLE_IOCTL(RTC_PIE_OFF),
+COMPATIBLE_IOCTL(RTC_WIE_ON),
+COMPATIBLE_IOCTL(RTC_WIE_OFF),
+COMPATIBLE_IOCTL(RTC_ALM_SET),
+COMPATIBLE_IOCTL(RTC_ALM_READ),
+COMPATIBLE_IOCTL(RTC_RD_TIME),
+COMPATIBLE_IOCTL(RTC_SET_TIME),
+COMPATIBLE_IOCTL(RTC_WKALM_SET),
+COMPATIBLE_IOCTL(RTC_WKALM_RD),
+/* Little m */
+COMPATIBLE_IOCTL(MTIOCTOP),
+/* Socket level stuff */
+COMPATIBLE_IOCTL(FIOSETOWN),
+COMPATIBLE_IOCTL(SIOCSPGRP),
+COMPATIBLE_IOCTL(FIOGETOWN),
+COMPATIBLE_IOCTL(SIOCGPGRP),
+COMPATIBLE_IOCTL(SIOCATMARK),
+COMPATIBLE_IOCTL(SIOCSIFLINK),
+COMPATIBLE_IOCTL(SIOCSIFENCAP),
+COMPATIBLE_IOCTL(SIOCGIFENCAP),
+COMPATIBLE_IOCTL(SIOCSIFBR),
+COMPATIBLE_IOCTL(SIOCGIFBR),
+COMPATIBLE_IOCTL(SIOCSARP),
+COMPATIBLE_IOCTL(SIOCGARP),
+COMPATIBLE_IOCTL(SIOCDARP),
+COMPATIBLE_IOCTL(SIOCSRARP),
+COMPATIBLE_IOCTL(SIOCGRARP),
+COMPATIBLE_IOCTL(SIOCDRARP),
+COMPATIBLE_IOCTL(SIOCADDDLCI),
+COMPATIBLE_IOCTL(SIOCDELDLCI),
+/* SG stuff */
+COMPATIBLE_IOCTL(SG_SET_TIMEOUT),
+COMPATIBLE_IOCTL(SG_GET_TIMEOUT),
+COMPATIBLE_IOCTL(SG_EMULATED_HOST),
+COMPATIBLE_IOCTL(SG_SET_TRANSFORM),
+COMPATIBLE_IOCTL(SG_GET_TRANSFORM),
+COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE),
+COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE),
+COMPATIBLE_IOCTL(SG_GET_SCSI_ID),
+COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA),
+COMPATIBLE_IOCTL(SG_GET_LOW_DMA),
+COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID),
+COMPATIBLE_IOCTL(SG_GET_PACK_ID),
+COMPATIBLE_IOCTL(SG_GET_NUM_WAITING),
+COMPATIBLE_IOCTL(SG_SET_DEBUG),
+COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE),
+COMPATIBLE_IOCTL(SG_GET_COMMAND_Q),
+COMPATIBLE_IOCTL(SG_SET_COMMAND_Q),
+COMPATIBLE_IOCTL(SG_GET_VERSION_NUM),
+COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN),
+COMPATIBLE_IOCTL(SG_SCSI_RESET),
+COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE),
+COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN),
+COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN),
+/* PPP stuff */
+COMPATIBLE_IOCTL(PPPIOCGFLAGS),
+COMPATIBLE_IOCTL(PPPIOCSFLAGS),
+COMPATIBLE_IOCTL(PPPIOCGASYNCMAP),
+COMPATIBLE_IOCTL(PPPIOCSASYNCMAP),
+COMPATIBLE_IOCTL(PPPIOCGUNIT),
+COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP),
+COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP),
+COMPATIBLE_IOCTL(PPPIOCGMRU),
+COMPATIBLE_IOCTL(PPPIOCSMRU),
+COMPATIBLE_IOCTL(PPPIOCSMAXCID),
+COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP),
+COMPATIBLE_IOCTL(LPGETSTATUS),
+COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP),
+COMPATIBLE_IOCTL(PPPIOCXFERUNIT),
+COMPATIBLE_IOCTL(PPPIOCGNPMODE),
+COMPATIBLE_IOCTL(PPPIOCSNPMODE),
+COMPATIBLE_IOCTL(PPPIOCGDEBUG),
+COMPATIBLE_IOCTL(PPPIOCSDEBUG),
+COMPATIBLE_IOCTL(PPPIOCNEWUNIT),
+COMPATIBLE_IOCTL(PPPIOCATTACH),
+COMPATIBLE_IOCTL(PPPIOCDETACH),
+COMPATIBLE_IOCTL(PPPIOCSMRRU),
+COMPATIBLE_IOCTL(PPPIOCCONNECT),
+COMPATIBLE_IOCTL(PPPIOCDISCONN),
+COMPATIBLE_IOCTL(PPPIOCATTCHAN),
+COMPATIBLE_IOCTL(PPPIOCGCHAN),
+/* PPPOX */
+COMPATIBLE_IOCTL(PPPOEIOCSFWD),
+COMPATIBLE_IOCTL(PPPOEIOCDFWD),
+/* CDROM stuff */
+COMPATIBLE_IOCTL(CDROMPAUSE),
+COMPATIBLE_IOCTL(CDROMRESUME),
+COMPATIBLE_IOCTL(CDROMPLAYMSF),
+COMPATIBLE_IOCTL(CDROMPLAYTRKIND),
+COMPATIBLE_IOCTL(CDROMREADTOCHDR),
+COMPATIBLE_IOCTL(CDROMREADTOCENTRY),
+COMPATIBLE_IOCTL(CDROMSTOP),
+COMPATIBLE_IOCTL(CDROMSTART),
+COMPATIBLE_IOCTL(CDROMEJECT),
+COMPATIBLE_IOCTL(CDROMVOLCTRL),
+COMPATIBLE_IOCTL(CDROMSUBCHNL),
+COMPATIBLE_IOCTL(CDROMEJECT_SW),
+COMPATIBLE_IOCTL(CDROMMULTISESSION),
+COMPATIBLE_IOCTL(CDROM_GET_MCN),
+COMPATIBLE_IOCTL(CDROMRESET),
+COMPATIBLE_IOCTL(CDROMVOLREAD),
+COMPATIBLE_IOCTL(CDROMSEEK),
+COMPATIBLE_IOCTL(CDROMPLAYBLK),
+COMPATIBLE_IOCTL(CDROMCLOSETRAY),
+COMPATIBLE_IOCTL(CDROM_SET_OPTIONS),
+COMPATIBLE_IOCTL(CDROM_CLEAR_OPTIONS),
+COMPATIBLE_IOCTL(CDROM_SELECT_SPEED),
+COMPATIBLE_IOCTL(CDROM_SELECT_DISC),
+COMPATIBLE_IOCTL(CDROM_MEDIA_CHANGED),
+COMPATIBLE_IOCTL(CDROM_DRIVE_STATUS),
+COMPATIBLE_IOCTL(CDROM_DISC_STATUS),
+COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS),
+COMPATIBLE_IOCTL(CDROM_LOCKDOOR),
+COMPATIBLE_IOCTL(CDROM_DEBUG),
+COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY),
+/* DVD ioctls */
+COMPATIBLE_IOCTL(DVD_READ_STRUCT),
+COMPATIBLE_IOCTL(DVD_WRITE_STRUCT),
+COMPATIBLE_IOCTL(DVD_AUTH),
+/* Big L */
+COMPATIBLE_IOCTL(LOOP_SET_FD),
+COMPATIBLE_IOCTL(LOOP_CLR_FD),
+/* Big Q for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC),
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE),
+COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS),
+COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD),
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL),
+COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND),
+COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME),
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID),
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL),
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE),
+/* Big T for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE),
+COMPATIBLE_IOCTL(SNDCTL_TMR_START),
+COMPATIBLE_IOCTL(SNDCTL_TMR_STOP),
+COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE),
+COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO),
+COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE),
+COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME),
+COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT),
+/* Little m for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME),
+COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE),
+COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD),
+/* Big P for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_DSP_RESET),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED),
+COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE),
+COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS),
+COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER),
+COMPATIBLE_IOCTL(SNDCTL_DSP_POST),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE),
+COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR),
+/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
+/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO),
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX),
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY),
+COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE),
+COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE),
+COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS),
+COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS),
+COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER),
+/* Big C for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_COPR_RESET),
+COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD),
+COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA),
+COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE),
+COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA),
+COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE),
+COMPATIBLE_IOCTL(SNDCTL_COPR_RUN),
+COMPATIBLE_IOCTL(SNDCTL_COPR_HALT),
+COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG),
+COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG),
+/* Big M for sound/OSS */
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO)),
+COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR)),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE),
+/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
+/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS),
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO)),
+COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR)),
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE),
+/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
+/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC),
+COMPATIBLE_IOCTL(SOUND_MIXER_INFO),
+COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO),
+COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS),
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1),
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2),
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3),
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4),
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5),
+COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS),
+COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS),
+COMPATIBLE_IOCTL(OSS_GETVERSION),
+/* AUTOFS */
+COMPATIBLE_IOCTL(AUTOFS_IOC_READY),
+COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL),
+COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC),
+COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER),
+COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE),
+/* DEVFS */
+COMPATIBLE_IOCTL(DEVFSDIOC_GET_PROTO_REV),
+COMPATIBLE_IOCTL(DEVFSDIOC_SET_EVENT_MASK),
+COMPATIBLE_IOCTL(DEVFSDIOC_RELEASE_EVENT_QUEUE),
+COMPATIBLE_IOCTL(DEVFSDIOC_SET_DEBUG_MASK),
+/* Raw devices */
+COMPATIBLE_IOCTL(RAW_SETBIND),
+COMPATIBLE_IOCTL(RAW_GETBIND),
+/* SMB ioctls which do not need any translations */
+COMPATIBLE_IOCTL(SMB_IOC_NEWCONN),
+/* Little a */
+COMPATIBLE_IOCTL(ATMSIGD_CTRL),
+COMPATIBLE_IOCTL(ATMARPD_CTRL),
+COMPATIBLE_IOCTL(ATMLEC_CTRL),
+COMPATIBLE_IOCTL(ATMLEC_MCAST),
+COMPATIBLE_IOCTL(ATMLEC_DATA),
+COMPATIBLE_IOCTL(ATM_SETSC),
+COMPATIBLE_IOCTL(SIOCSIFATMTCP),
+COMPATIBLE_IOCTL(SIOCMKCLIP),
+COMPATIBLE_IOCTL(ATMARP_MKIP),
+COMPATIBLE_IOCTL(ATMARP_SETENTRY),
+COMPATIBLE_IOCTL(ATMARP_ENCAP),
+COMPATIBLE_IOCTL(ATMTCP_CREATE),
+COMPATIBLE_IOCTL(ATMTCP_REMOVE),
+COMPATIBLE_IOCTL(ATMMPC_CTRL),
+COMPATIBLE_IOCTL(ATMMPC_DATA),
+#if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+/* 0xfe - lvm */
+COMPATIBLE_IOCTL(VG_SET_EXTENDABLE),
+COMPATIBLE_IOCTL(VG_STATUS_GET_COUNT),
+COMPATIBLE_IOCTL(VG_STATUS_GET_NAMELIST),
+COMPATIBLE_IOCTL(VG_REMOVE),
+COMPATIBLE_IOCTL(VG_RENAME),
+COMPATIBLE_IOCTL(VG_REDUCE),
+COMPATIBLE_IOCTL(PE_LOCK_UNLOCK),
+COMPATIBLE_IOCTL(PV_FLUSH),
+COMPATIBLE_IOCTL(LVM_LOCK_LVM),
+COMPATIBLE_IOCTL(LVM_GET_IOP_VERSION),
+#ifdef LVM_TOTAL_RESET
+COMPATIBLE_IOCTL(LVM_RESET),
+#endif
+COMPATIBLE_IOCTL(LV_SET_ACCESS),
+COMPATIBLE_IOCTL(LV_SET_STATUS),
+COMPATIBLE_IOCTL(LV_SET_ALLOCATION),
+COMPATIBLE_IOCTL(LE_REMAP),
+COMPATIBLE_IOCTL(LV_BMAP),
+COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE),
+#endif /* LVM */
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC),
+COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID),
+COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC),
+COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK),
+COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK),
+COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL),
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS),
+COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS),
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX),
+COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX),
+COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX),
+COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX),
+COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX),
+COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX),
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW),
+COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW),
+COMPATIBLE_IOCTL(DRM_IOCTL_LOCK),
+COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK),
+COMPATIBLE_IOCTL(DRM_IOCTL_FINISH),
+#endif /* DRM */
+/* elevator */
+COMPATIBLE_IOCTL(BLKELVGET),
+COMPATIBLE_IOCTL(BLKELVSET),
+/* Big W */
+/* WIOC_GETSUPPORT not yet implemented -E */
+COMPATIBLE_IOCTL(WDIOC_GETSTATUS),
+COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS),
+COMPATIBLE_IOCTL(WDIOC_GETTEMP),
+COMPATIBLE_IOCTL(WDIOC_SETOPTIONS),
+COMPATIBLE_IOCTL(WDIOC_KEEPALIVE),
+/* Bluetooth ioctls */
+COMPATIBLE_IOCTL(HCIDEVUP),
+COMPATIBLE_IOCTL(HCIDEVDOWN),
+COMPATIBLE_IOCTL(HCIDEVRESET),
+COMPATIBLE_IOCTL(HCIRESETSTAT),
+COMPATIBLE_IOCTL(HCIGETINFO),
+COMPATIBLE_IOCTL(HCIGETDEVLIST),
+COMPATIBLE_IOCTL(HCISETRAW),
+COMPATIBLE_IOCTL(HCISETSCAN),
+COMPATIBLE_IOCTL(HCISETAUTH),
+COMPATIBLE_IOCTL(HCIINQUIRY),
+COMPATIBLE_IOCTL(PCIIOC_CONTROLLER),
+COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO),
+COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM),
+COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE),
+/* USB */
+COMPATIBLE_IOCTL(USBDEVFS_RESETEP),
+COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE),
+COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION),
+COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER),
+COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB),
+COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE),
+COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE),
+COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO),
+COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO),
+COMPATIBLE_IOCTL(USBDEVFS_RESET),
+COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT),
+/* MTD */
+COMPATIBLE_IOCTL(MEMGETINFO),
+COMPATIBLE_IOCTL(MEMERASE),
+COMPATIBLE_IOCTL(MEMLOCK),
+COMPATIBLE_IOCTL(MEMUNLOCK),
+COMPATIBLE_IOCTL(MEMGETREGIONCOUNT),
+COMPATIBLE_IOCTL(MEMGETREGIONINFO),
+/* NBD */
+COMPATIBLE_IOCTL(NBD_SET_SOCK),
+COMPATIBLE_IOCTL(NBD_SET_BLKSIZE),
+COMPATIBLE_IOCTL(NBD_SET_SIZE),
+COMPATIBLE_IOCTL(NBD_DO_IT),
+COMPATIBLE_IOCTL(NBD_CLEAR_SOCK),
+COMPATIBLE_IOCTL(NBD_CLEAR_QUE),
+COMPATIBLE_IOCTL(NBD_PRINT_DEBUG),
+COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS),
+COMPATIBLE_IOCTL(NBD_DISCONNECT),
+/* Remove *PRIVATE in 2.5 */
+COMPATIBLE_IOCTL(SIOCDEVPRIVATE),
+COMPATIBLE_IOCTL(SIOCDEVPRIVATE+1),
+COMPATIBLE_IOCTL(SIOCDEVPRIVATE+2),
+COMPATIBLE_IOCTL(SIOCGMIIPHY),
+COMPATIBLE_IOCTL(SIOCGMIIREG),
+COMPATIBLE_IOCTL(SIOCSMIIREG),
+/* And these ioctls need translation */
+HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob),
+HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob),
+#ifdef CONFIG_NET
+HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32),
+#endif
+HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf),
+HANDLE_IOCTL(SIOCGIFFLAGS, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFFLAGS, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFMETRIC, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFMETRIC, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFMTU, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFMTU, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFMEM, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFMEM, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFHWADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFHWADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCADDMULTI, dev_ifsioc),
+HANDLE_IOCTL(SIOCDELMULTI, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFINDEX, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFBRDADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFBRDADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFDSTADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFDSTADDR, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFNETMASK, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFNETMASK, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFPFLAGS, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc),
+HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc),
+HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc),
+HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl),
+HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl),
+HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl),
+HANDLE_IOCTL(SIOCBONDSETHWADDR, bond_ioctl),
+HANDLE_IOCTL(SIOCBONDSLAVEINFOQUERY, bond_ioctl),
+HANDLE_IOCTL(SIOCBONDINFOQUERY, bond_ioctl),
+HANDLE_IOCTL(SIOCBONDCHANGEACTIVE, bond_ioctl),
+HANDLE_IOCTL(SIOCADDRT, routing_ioctl),
+HANDLE_IOCTL(SIOCDELRT, routing_ioctl),
+/* Note SIOCRTMSG is no longer, so this is safe and
+ * the user would have seen just an -EINVAL anyways. */
+HANDLE_IOCTL(SIOCRTMSG, ret_einval),
+HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp),
+HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo),
+HANDLE_IOCTL(BLKRAGET, w_long),
+HANDLE_IOCTL(BLKGETSIZE, w_long),
+HANDLE_IOCTL(0x1260, broken_blkgetsize),
+HANDLE_IOCTL(BLKFRAGET, w_long),
+HANDLE_IOCTL(BLKSECTGET, w_long),
+HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans),
+HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans),
+HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans),
+HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans),
+HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans),
+HANDLE_IOCTL(FDSETDRVPRM32, fd_ioctl_trans),
+HANDLE_IOCTL(FDGETDRVPRM32, fd_ioctl_trans),
+HANDLE_IOCTL(FDGETDRVSTAT32, fd_ioctl_trans),
+HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans),
+HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans),
+HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans),
+HANDLE_IOCTL(PPPIOCGIDLE32, ppp_ioctl_trans),
+HANDLE_IOCTL(PPPIOCSCOMPRESS32, ppp_ioctl_trans),
+HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans),
+HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans),
+HANDLE_IOCTL(MTIOCGETCONFIG32, mt_ioctl_trans),
+HANDLE_IOCTL(MTIOCSETCONFIG32, mt_ioctl_trans),
+HANDLE_IOCTL(CDROMREADMODE2, cdrom_ioctl_trans),
+HANDLE_IOCTL(CDROMREADMODE1, cdrom_ioctl_trans),
+HANDLE_IOCTL(CDROMREADRAW, cdrom_ioctl_trans),
+HANDLE_IOCTL(CDROMREADCOOKED, cdrom_ioctl_trans),
+HANDLE_IOCTL(CDROMREADAUDIO, cdrom_ioctl_trans),
+HANDLE_IOCTL(CDROMREADALL, cdrom_ioctl_trans),
+HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans),
+HANDLE_IOCTL(LOOP_SET_STATUS, loop_status),
+HANDLE_IOCTL(LOOP_GET_STATUS, loop_status),
+HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout),
+#ifdef CONFIG_VT
+HANDLE_IOCTL(PIO_FONTX, do_fontx_ioctl),
+HANDLE_IOCTL(GIO_FONTX, do_fontx_ioctl),
+HANDLE_IOCTL(PIO_UNIMAP, do_unimap_ioctl),
+HANDLE_IOCTL(GIO_UNIMAP, do_unimap_ioctl),
+HANDLE_IOCTL(KDFONTOP, do_kdfontop_ioctl),
+HANDLE_IOCTL(FBIOGET_FSCREENINFO, do_fbioget_fscreeninfo_ioctl),
+HANDLE_IOCTL(FBIOGETCMAP, do_fbiogetcmap_ioctl),
+HANDLE_IOCTL(FBIOPUTCMAP, do_fbioputcmap_ioctl),
+#endif
+HANDLE_IOCTL(EXT2_IOC32_GETFLAGS, do_ext2_ioctl),
+HANDLE_IOCTL(EXT2_IOC32_SETFLAGS, do_ext2_ioctl),
+HANDLE_IOCTL(EXT2_IOC32_GETVERSION, do_ext2_ioctl),
+HANDLE_IOCTL(EXT2_IOC32_SETVERSION, do_ext2_ioctl),
+HANDLE_IOCTL(VIDIOCGTUNER32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCSTUNER32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCGWIN32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCSWIN32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCGFBUF32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCSFBUF32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCGFREQ32, do_video_ioctl),
+HANDLE_IOCTL(VIDIOCSFREQ32, do_video_ioctl),
+/* One SMB ioctl needs translations. */
+HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid),
+HANDLE_IOCTL(ATM_GETLINKRATE32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETNAMES32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETTYPE32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETESI32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETADDR32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_RSTADDR32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_ADDADDR32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_DELADDR32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETCIRANGE32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_SETCIRANGE32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_SETESI32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_SETESIF32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETSTAT32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETSTATZ32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_GETLOOP32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_SETLOOP32, do_atm_ioctl),
+HANDLE_IOCTL(ATM_QUERYLOOP32, do_atm_ioctl),
+HANDLE_IOCTL(SONET_GETSTAT, do_atm_ioctl),
+HANDLE_IOCTL(SONET_GETSTATZ, do_atm_ioctl),
+HANDLE_IOCTL(SONET_GETDIAG, do_atm_ioctl),
+HANDLE_IOCTL(SONET_SETDIAG, do_atm_ioctl),
+HANDLE_IOCTL(SONET_CLRDIAG, do_atm_ioctl),
+HANDLE_IOCTL(SONET_SETFRAMING, do_atm_ioctl),
+HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl),
+HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl),
+#if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+HANDLE_IOCTL(VG_STATUS, do_lvm_ioctl),
+HANDLE_IOCTL(VG_CREATE_OLD, do_lvm_ioctl),
+HANDLE_IOCTL(VG_CREATE, do_lvm_ioctl),
+HANDLE_IOCTL(VG_EXTEND, do_lvm_ioctl),
+HANDLE_IOCTL(LV_CREATE, do_lvm_ioctl),
+HANDLE_IOCTL(LV_REMOVE, do_lvm_ioctl),
+HANDLE_IOCTL(LV_EXTEND, do_lvm_ioctl),
+HANDLE_IOCTL(LV_REDUCE, do_lvm_ioctl),
+HANDLE_IOCTL(LV_RENAME, do_lvm_ioctl),
+HANDLE_IOCTL(LV_STATUS_BYNAME, do_lvm_ioctl),
+HANDLE_IOCTL(LV_STATUS_BYINDEX, do_lvm_ioctl),
+HANDLE_IOCTL(LV_STATUS_BYDEV, do_lvm_ioctl),
+HANDLE_IOCTL(PV_CHANGE, do_lvm_ioctl),
+HANDLE_IOCTL(PV_STATUS, do_lvm_ioctl),
+#endif /* LVM */
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version),
+HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique),
+HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique),
+HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap),
+HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs),
+HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs),
+HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs),
+HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma),
+HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx),
+#endif /* DRM */
+#if 0
+HANDLE_IOCTL(USBDEVFS_CONTROL32, do_usbdevfs_control),
+HANDLE_IOCTL(USBDEVFS_BULK32, do_usbdevfs_bulk),
+/*HANDLE_IOCTL(USBDEVFS_SUBMITURB32, do_usbdevfs_urb)*/
+HANDLE_IOCTL(USBDEVFS_REAPURB32, do_usbdevfs_reapurb),
+HANDLE_IOCTL(USBDEVFS_REAPURBNDELAY32, do_usbdevfs_reapurb),
+HANDLE_IOCTL(USBDEVFS_DISCSIGNAL32, do_usbdevfs_discsignal),
+#endif
+HANDLE_IOCTL(SG_IO, do_sg_io),
+};
+
+unsigned long ioctl32_hash_table[1024];
+
+static inline unsigned long ioctl32_hash(unsigned long cmd)
+{
+ return ((cmd >> 6) ^ (cmd >> 4) ^ cmd) & 0x3ff;
+}
+
+static void ioctl32_insert_translation(struct ioctl_trans *trans)
+{
+ unsigned long hash;
+ struct ioctl_trans *t;
+
+ hash = ioctl32_hash (trans->cmd);
+ if (!ioctl32_hash_table[hash])
+ ioctl32_hash_table[hash] = (long)trans;
+ else {
+ t = (struct ioctl_trans *)ioctl32_hash_table[hash];
+ while (t->next)
+ t = (struct ioctl_trans *)(long)t->next;
+ trans->next = 0;
+ t->next = (long)trans;
+ }
+}
+
+static int __init init_sys32_ioctl(void)
+{
+ int i, size = sizeof(ioctl_translations) / sizeof(struct ioctl_trans);
+ for (i=0; i < size ;i++)
+ ioctl32_insert_translation(&ioctl_translations[i]);
+ return 0;
+}
+
+__initcall(init_sys32_ioctl);
+
+static struct ioctl_trans *additional_ioctls;
+
+/* Always call these with kernel lock held! */
+
+int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *))
+{
+ int i;
+ if (!additional_ioctls) {
+ additional_ioctls = module_map(PAGE_SIZE);
+ if (!additional_ioctls)
+ return -ENOMEM;
+ memset(additional_ioctls, 0, PAGE_SIZE);
+ }
+ for (i = 0; i < PAGE_SIZE/sizeof(struct ioctl_trans); i++)
+ if (!additional_ioctls[i].cmd)
+ break;
+ if (i == PAGE_SIZE/sizeof(struct ioctl_trans))
+ return -ENOMEM;
+ additional_ioctls[i].cmd = cmd;
+ if (!handler)
+ additional_ioctls[i].handler = (long)sys_ioctl;
+ else
+ additional_ioctls[i].handler = (long)handler;
+ ioctl32_insert_translation(&additional_ioctls[i]);
+ return 0;
+}
+
+int unregister_ioctl32_conversion(unsigned int cmd)
+{
+ unsigned long hash = ioctl32_hash(cmd);
+ struct ioctl_trans *t, *t1;
+
+ t = (struct ioctl_trans *)ioctl32_hash_table[hash];
+ if (!t) return -EINVAL;
+ if (t->cmd == cmd && t >= additional_ioctls &&
+ (unsigned long)t < ((unsigned long)additional_ioctls) + PAGE_SIZE) {
+ ioctl32_hash_table[hash] = t->next;
+ t->cmd = 0;
+ return 0;
+ } else while (t->next) {
+ t1 = (struct ioctl_trans *)t->next;
+ if (t1->cmd == cmd && t1 >= additional_ioctls &&
+ (unsigned long)t1 < ((unsigned long)additional_ioctls) + PAGE_SIZE) {
+ t1->cmd = 0;
+ t->next = t1->next;
+ return 0;
+ }
+ t = t1;
+ }
+ return -EINVAL;
+}
+
+asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct file * filp;
+ int error = -EBADF;
+ int (*handler)(unsigned int, unsigned int, unsigned long, struct file * filp);
+ struct ioctl_trans *t;
+
+ filp = fget(fd);
+ if (!filp)
+ goto out2;
+
+ if (!filp->f_op || !filp->f_op->ioctl) {
+ error = sys_ioctl (fd, cmd, arg);
+ goto out;
+ }
+
+ t = (struct ioctl_trans *)ioctl32_hash_table [ioctl32_hash (cmd)];
+
+ while (t && t->cmd != cmd)
+ t = (struct ioctl_trans *)t->next;
+ if (t) {
+ handler = (void *)t->handler;
+ error = handler(fd, cmd, arg, filp);
+ } else {
+ static int count = 0;
+ if (++count <= 20)
+ printk("sys32_ioctl(%s:%d): Unknown cmd fd(%d) "
+ "cmd(%08x) arg(%08x)\n",
+ current->comm, current->pid,
+ (int)fd, (unsigned int)cmd, (unsigned int)arg);
+ error = -EINVAL;
+ }
+out:
+ fput(filp);
+out2:
+ return error;
+}
--- /dev/null
+/*
+ *
+ *
+ * arch/ppc/kernel/irq.c
+ *
+ * Derived from arch/i386/kernel/irq.c
+ * Copyright (C) 1992 Linus Torvalds
+ * Adapted from arch/i386 by Gary Thomas
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Cort Dougan
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/threads.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/random.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/cache.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/machdep.h>
+
+#include "local_irq.h"
+
+void enable_irq(unsigned int irq_nr);
+void disable_irq(unsigned int irq_nr);
+
+#ifdef CONFIG_SMP
+extern void iSeries_smp_message_recv( struct pt_regs * );
+#endif
+
+volatile unsigned char *chrp_int_ack_special;
+static void register_irq_proc (unsigned int irq);
+
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
+
+int ppc_spurious_interrupts = 0;
+struct irqaction *ppc_irq_action[NR_IRQS];
+unsigned long lpEvent_count = 0;
+#ifdef CONFIG_XMON
+extern void xmon(struct pt_regs *regs);
+extern int xmon_bpt(struct pt_regs *regs);
+extern int xmon_sstep(struct pt_regs *regs);
+extern int xmon_iabr_match(struct pt_regs *regs);
+extern int xmon_dabr_match(struct pt_regs *regs);
+extern void (*xmon_fault_handler)(struct pt_regs *regs);
+#endif
+#ifdef CONFIG_XMON
+extern void (*debugger)(struct pt_regs *regs);
+extern int (*debugger_bpt)(struct pt_regs *regs);
+extern int (*debugger_sstep)(struct pt_regs *regs);
+extern int (*debugger_iabr_match)(struct pt_regs *regs);
+extern int (*debugger_dabr_match)(struct pt_regs *regs);
+extern void (*debugger_fault_handler)(struct pt_regs *regs);
+#endif
+
+/* nasty hack for shared irq's since we need to do kmalloc calls but
+ * can't very early in the boot when we need to do a request irq.
+ * this needs to be removed.
+ * -- Cort
+ */
+#define IRQ_KMALLOC_ENTRIES 8
+static int cache_bitmask = 0;
+static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
+extern int mem_init_done;
+
+void *irq_kmalloc(size_t size, int pri)
+{
+ unsigned int i;
+ if ( mem_init_done )
+ return kmalloc(size,pri);
+ for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
+ if ( ! ( cache_bitmask & (1<<i) ) ) {
+ cache_bitmask |= (1<<i);
+ return (void *)(&malloc_cache[i]);
+ }
+ return 0;
+}
+
+void irq_kfree(void *ptr)
+{
+ unsigned int i;
+ for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
+ if ( ptr == &malloc_cache[i] ) {
+ cache_bitmask &= ~(1<<i);
+ return;
+ }
+ kfree(ptr);
+}
+
+int
+setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = irq_desc + irq;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
+ unmask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ register_irq_proc(irq);
+ return 0;
+}
+
+/* This could be promoted to a real free_irq() ... */
+static int
+do_free_irq(int irq, void* dev_id)
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+#ifdef CONFIG_SMP
+ /* Wait to make sure it's not being used on another CPU */
+ while (desc->status & IRQ_INPROGRESS)
+ barrier();
+#endif
+ irq_kfree(action);
+ return 0;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ break;
+ }
+ return -ENOENT;
+}
+
+int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id)
+{
+ struct irqaction *action;
+ int retval;
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ /* We could implement really free_irq() instead of that... */
+ return do_free_irq(irq, dev_id);
+
+ action = (struct irqaction *)
+ irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action) {
+ printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
+ return -ENOMEM;
+ }
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->dev_id = dev_id;
+ action->next = NULL;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+
+ return 0;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ request_irq(irq, NULL, 0, NULL, dev_id);
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+
+ void disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ if (!(desc->status & IRQ_PER_CPU))
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. That is for two disables you need two enables. This
+ * function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+
+void disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count(smp_processor_id())) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
+}
+
+/**
+ * enable_irq - enable interrupt handling on an irq
+ * @irq: Interrupt to enable
+ *
+ * Re-enables the processing of interrupts on this IRQ line
+ * providing no disable_irq calls are now in effect.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ unmask_irq(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk("enable_irq(%u) unbalanced\n", irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i, j;
+ struct irqaction * action;
+
+ seq_printf(p, " ");
+ for (j=0; j<smp_num_cpus; j++)
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = irq_desc[i].action;
+ if (!action || !action->handler)
+ continue;
+ seq_printf(p, "%3d: ", i);
+#ifdef CONFIG_SMP
+ for (j = 0; j < smp_num_cpus; j++)
+ seq_printf(p, "%10u ",
+ kstat.irqs[cpu_logical_map(j)][i]);
+#else
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#endif /* CONFIG_SMP */
+ if (irq_desc[i].handler)
+ seq_printf(p, " %s ", irq_desc[i].handler->typename );
+ else
+ seq_printf(p, " None ");
+ seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
+ seq_printf(p, " %s",action->name);
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+ seq_putc(p, '\n');
+ }
+ seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
+ return 0;
+}
+
+static inline void
+handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
+{
+ int status = 0;
+
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+}
+
+/*
+ * Eventually, this should take an array of interrupts and an array size
+ * so it can dispatch multiple interrupts.
+ */
+void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
+{
+ int status;
+ struct irqaction *action;
+ int cpu = smp_processor_id();
+ irq_desc_t *desc = irq_desc + irq;
+
+ kstat.irqs[cpu][irq]++;
+ spin_lock(&desc->lock);
+ ack_irq(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ if (!(status & IRQ_PER_CPU))
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ if (!action || !action->handler) {
+ ppc_spurious_interrupts++;
+ printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
+ /* We can't call disable_irq here, it would deadlock */
+ if (!desc->depth)
+ desc->depth = 1;
+ desc->status |= IRQ_DISABLED;
+ /* This is not a real spurrious interrupt, we
+ * have to eoi it, so we jump to out
+ */
+ mask_irq(irq);
+ goto out;
+ }
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ if (!(status & IRQ_PER_CPU))
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ goto out;
+
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ handle_irq_event(irq, regs, action);
+ spin_lock(&desc->lock);
+
+ if (!(desc->status & IRQ_PENDING))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ if (irq_desc[irq].handler) {
+ if (irq_desc[irq].handler->end)
+ irq_desc[irq].handler->end(irq);
+ else if (irq_desc[irq].handler->enable)
+ irq_desc[irq].handler->enable(irq);
+ }
+ spin_unlock(&desc->lock);
+}
+
+int do_IRQ(struct pt_regs *regs, int isfake)
+{
+ int cpu = smp_processor_id();
+ int irq;
+ struct Paca * paca;
+ struct ItLpQueue * lpq;
+
+ /* if(cpu) udbg_printf("Entering do_IRQ\n"); */
+
+ irq_enter(cpu);
+
+ if ( _machine != _MACH_iSeries ) {
+
+ /* every arch is required to have a get_irq -- Cort */
+ irq = ppc_md.get_irq( regs );
+
+ if ( irq >= 0 ) {
+ ppc_irq_dispatch_handler( regs, irq );
+ if (ppc_md.post_irq)
+ ppc_md.post_irq( regs, irq );
+ } else {
+ /* -2 means ignore, already handled */
+ if (irq != -2) {
+ printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n",
+ irq, regs->nip);
+ ppc_spurious_interrupts++;
+ }
+ }
+ }
+ /* if on iSeries partition */
+ else {
+ paca = (struct Paca *)mfspr(SPRG3);
+#ifdef CONFIG_SMP
+ if ( paca->xLpPaca.xIntDword.xFields.xIpiCnt ) {
+ paca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
+ iSeries_smp_message_recv( regs );
+ }
+#endif /* CONFIG_SMP */
+ lpq = paca->lpQueuePtr;
+ if ( lpq && ItLpQueue_isLpIntPending( lpq ) )
+ lpEvent_count += ItLpQueue_process( lpq, regs );
+ }
+
+ irq_exit(cpu);
+
+ if ( _machine == _MACH_iSeries ) {
+ if ( paca->xLpPaca.xIntDword.xFields.xDecrInt ) {
+ paca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
+ /* Signal a fake decrementer interrupt */
+ timer_interrupt( regs );
+ }
+ }
+
+ if (softirq_pending(cpu))
+ do_softirq();
+
+ return 1; /* lets ret_from_int know we can do checks */
+}
+
+unsigned long probe_irq_on (void)
+{
+ return 0;
+}
+
+int probe_irq_off (unsigned long irqs)
+{
+ return 0;
+}
+
+unsigned int probe_irq_mask(unsigned long irqs)
+{
+ return 0;
+}
+
+void __init init_IRQ(void)
+{
+ static int once = 0;
+
+ if ( once )
+ return;
+ else
+ once++;
+
+ ppc_md.init_IRQ();
+ if(ppc_md.init_ras_IRQ) ppc_md.init_ras_IRQ();
+}
+
+#ifdef CONFIG_SMP
+unsigned char global_irq_holder = NO_PROC_ID;
+
+static void show(char * str)
+{
+ int cpu = smp_processor_id();
+ int i;
+
+ printk("\n%s, CPU %d:\n", str, cpu);
+ printk("irq: %d [ ", irqs_running());
+ for (i = 0; i < smp_num_cpus; i++)
+ printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
+ printk("]\nbh: %d [ ",
+ (spin_is_locked(&global_bh_lock) ? 1 : 0));
+ for (i = 0; i < smp_num_cpus; i++)
+ printk("%u ", local_bh_count(i));
+ printk("]\n");
+}
+
+#define MAXCOUNT 10000000
+
+void synchronize_irq(void)
+{
+ if (irqs_running()) {
+ cli();
+ sti();
+ }
+}
+
+static inline void get_irqlock(int cpu)
+{
+ int count;
+
+ if ((unsigned char)cpu == global_irq_holder)
+ return;
+
+ count = MAXCOUNT;
+again:
+ br_write_lock(BR_GLOBALIRQ_LOCK);
+ for (;;) {
+ spinlock_t *lock;
+
+ if (!irqs_running() &&
+ (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
+ break;
+
+ br_write_unlock(BR_GLOBALIRQ_LOCK);
+ lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
+ while (irqs_running() ||
+ spin_is_locked(lock) ||
+ (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
+ if (!--count) {
+ show("get_irqlock");
+ count = (~0 >> 1);
+ }
+ __sti();
+ barrier();
+ __cli();
+ }
+ goto again;
+ }
+
+ global_irq_holder = cpu;
+}
+
+/*
+ * A global "cli()" while in an interrupt context
+ * turns into just a local cli(). Interrupts
+ * should use spinlocks for the (very unlikely)
+ * case that they ever want to protect against
+ * each other.
+ *
+ * If we already have local interrupts disabled,
+ * this will not turn a local disable into a
+ * global one (problems with spinlocks: this makes
+ * save_flags+cli+sti usable inside a spinlock).
+ */
+void __global_cli(void)
+{
+ unsigned long flags;
+
+ __save_flags(flags);
+ if (flags & (1UL << 15)) {
+ int cpu = smp_processor_id();
+ __cli();
+ if (!local_irq_count(cpu))
+ get_irqlock(cpu);
+ }
+}
+
+void __global_sti(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!local_irq_count(cpu))
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long __global_save_flags(void)
+{
+ int retval;
+ int local_enabled;
+ unsigned long flags;
+
+ __save_flags(flags);
+ local_enabled = (flags >> 15) & 1;
+ /* default to local */
+ retval = 2 + local_enabled;
+
+ /* check for global flags if we're not in an interrupt */
+ if (!local_irq_count(smp_processor_id())) {
+ if (local_enabled)
+ retval = 1;
+ if (global_irq_holder == (unsigned char) smp_processor_id())
+ retval = 0;
+ }
+ return retval;
+}
+
+void __global_restore_flags(unsigned long flags)
+{
+ switch (flags) {
+ case 0:
+ __global_cli();
+ break;
+ case 1:
+ __global_sti();
+ break;
+ case 2:
+ __cli();
+ break;
+ case 3:
+ __sti();
+ break;
+ default:
+ printk("global_restore_flags: %016lx caller %p\n",
+ flags, __builtin_return_address(0));
+ }
+}
+
+#endif /* CONFIG_SMP */
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
+#ifdef CONFIG_IRQ_ALL_CPUS
+unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
+#else /* CONFIG_IRQ_ALL_CPUS */
+unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x00000000};
+#endif /* CONFIG_IRQ_ALL_CPUS */
+
+#define HEX_DIGITS 8
+
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08x\n", irq_affinity[(int)(long)data]);
+}
+
+static unsigned int parse_hex_value (const char *buffer,
+ unsigned long count, unsigned long *ret)
+{
+ unsigned char hexnum [HEX_DIGITS];
+ unsigned long value;
+ int i;
+
+ if (!count)
+ return -EINVAL;
+ if (count > HEX_DIGITS)
+ count = HEX_DIGITS;
+ if (copy_from_user(hexnum, buffer, count))
+ return -EFAULT;
+
+ /*
+ * Parse the first 8 characters as a hex string, any non-hex char
+ * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
+ */
+ value = 0;
+
+ for (i = 0; i < count; i++) {
+ unsigned int c = hexnum[i];
+
+ switch (c) {
+ case '0' ... '9': c -= '0'; break;
+ case 'a' ... 'f': c -= 'a'-10; break;
+ case 'A' ... 'F': c -= 'A'-10; break;
+ default:
+ goto out;
+ }
+ value = (value << 4) | c;
+ }
+out:
+ *ret = value;
+ return 0;
+}
+
+static int irq_affinity_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (int)(long) data, full_count = count, err;
+ unsigned long new_value;
+
+ if (!irq_desc[irq].handler->set_affinity)
+ return -EIO;
+
+ err = parse_hex_value(buffer, count, &new_value);
+
+/* Why is this disabled ? --BenH */
+#if 0/*CONFIG_SMP*/
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!(new_value & cpu_online_map))
+ return -EINVAL;
+#endif
+
+ irq_affinity[irq] = new_value;
+ irq_desc[irq].handler->set_affinity(irq, new_value);
+
+ return full_count;
+}
+
+static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long *mask = (unsigned long *) data;
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08lx\n", *mask);
+}
+
+static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ unsigned long *mask = (unsigned long *) data, full_count = count, err;
+ unsigned long new_value;
+
+ err = parse_hex_value(buffer, count, &new_value);
+ if (err)
+ return err;
+
+ *mask = new_value;
+
+#ifdef CONFIG_PPC_ISERIES
+ {
+ unsigned i;
+ for (i=0; i<maxPacas; ++i) {
+ if ( xPaca[i].prof_buffer && (new_value & 1) )
+ xPaca[i].prof_enabled = 1;
+ else
+ xPaca[i].prof_enabled = 0;
+ new_value >>= 1;
+ }
+ }
+#endif
+
+ return full_count;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ struct proc_dir_entry *entry;
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == NULL))
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+
+ smp_affinity_entry[irq] = entry;
+}
+
+unsigned long prof_cpu_mask = -1;
+
+void init_irq_proc (void)
+{
+ struct proc_dir_entry *entry;
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", 0);
+
+ /* create /proc/irq/prof_cpu_mask */
+ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
+
+ entry->nlink = 1;
+ entry->data = (void *)&prof_cpu_mask;
+ entry->read_proc = prof_cpu_mask_read_proc;
+ entry->write_proc = prof_cpu_mask_write_proc;
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_desc[i].handler == NULL)
+ continue;
+ register_irq_proc(i);
+ }
+}
+
+void no_action(int irq, void *dev, struct pt_regs *regs)
+{
+}
--- /dev/null
+/*
+ *
+ * Procedures for interfacing to Open Firmware.
+ *
+ * Peter Bergner, IBM Corp. June 2001.
+ * Copyright (C) 2001 Peter Bergner.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/abs_addr.h>
+#include <asm/bitops.h>
+#include <asm/udbg.h>
+
+extern unsigned long klimit;
+extern unsigned long reloc_offset(void);
+
+
+static long lmb_add_region(struct lmb_region *, unsigned long, unsigned long, unsigned long);
+
+struct lmb lmb = {
+ 0,
+ {0,0,0,{{0,0,0}}},
+ {0,0,0,{{0,0,0}}}
+};
+
+
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void
+lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
+{
+ unsigned long i;
+
+ rgn->region[r1].size += rgn->region[r2].size;
+ for (i=r2; i < rgn->cnt-1 ;i++) {
+ rgn->region[i].base = rgn->region[i+1].base;
+ rgn->region[i].physbase = rgn->region[i+1].physbase;
+ rgn->region[i].size = rgn->region[i+1].size;
+ rgn->region[i].type = rgn->region[i+1].type;
+ }
+ rgn->cnt--;
+}
+
+
+/* This routine called with relocation disabled. */
+void
+lmb_init(void)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+
+ /* Create a dummy zero size LMB which will get coalesced away later.
+ * This simplifies the lmb_add() code below...
+ */
+ _lmb->memory.region[0].base = 0;
+ _lmb->memory.region[0].size = 0;
+ _lmb->memory.region[0].type = LMB_MEMORY_AREA;
+ _lmb->memory.cnt = 1;
+
+ /* Ditto. */
+ _lmb->reserved.region[0].base = 0;
+ _lmb->reserved.region[0].size = 0;
+ _lmb->reserved.region[0].type = LMB_MEMORY_AREA;
+ _lmb->reserved.cnt = 1;
+}
+
+/* This is only used here, it doesnt deserve to be in bitops.h */
+static __inline__ long cnt_trailing_zeros(unsigned long mask)
+{
+ long cnt;
+
+ asm(
+" addi %0,%1,-1 \n\
+ andc %0,%0,%1 \n\
+ cntlzd %0,%0 \n\
+ subfic %0,%0,64"
+ : "=r" (cnt)
+ : "r" (mask));
+ return cnt;
+}
+
+/* This routine called with relocation disabled. */
+void
+lmb_analyze(void)
+{
+ unsigned long i, physbase = 0;
+ unsigned long total_size = 0;
+ unsigned long size_mask = 0;
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+
+ for (i=0; i < _lmb->memory.cnt ;i++) {
+ unsigned long lmb_size = _lmb->memory.region[i].size;
+#ifdef CONFIG_MSCHUNKS
+ _lmb->memory.region[i].physbase = physbase;
+ physbase += lmb_size;
+#else
+ _lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
+#endif
+ total_size += lmb_size;
+ size_mask |= lmb_size;
+ }
+ _lmb->memory.size = total_size;
+ _lmb->memory.lcd_size = (1UL << cnt_trailing_zeros(size_mask));
+}
+
+/* This routine called with relocation disabled. */
+long
+lmb_add(unsigned long base, unsigned long size)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_rgn = &(_lmb->memory);
+
+ return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
+
+}
+
+/* This routine called with relocation disabled. */
+long
+lmb_add_io(unsigned long base, unsigned long size)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_rgn = &(_lmb->memory);
+
+ return lmb_add_region(_rgn, base, size, LMB_IO_AREA);
+
+}
+
+long
+lmb_reserve(unsigned long base, unsigned long size)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_rgn = &(_lmb->reserved);
+
+ return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
+}
+
+/* This routine called with relocation disabled. */
+static long
+lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size,
+ unsigned long type)
+{
+ unsigned long i, coalesced = 0;
+ long adjacent;
+
+ /* First try and coalesce this LMB with another. */
+ for (i=0; i < rgn->cnt ;i++) {
+ unsigned long rgnbase = rgn->region[i].base;
+ unsigned long rgnsize = rgn->region[i].size;
+ unsigned long rgntype = rgn->region[i].type;
+
+ if ( rgntype != type )
+ continue;
+
+ adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
+ if ( adjacent > 0 ) {
+ rgn->region[i].base -= size;
+ rgn->region[i].physbase -= size;
+ rgn->region[i].size += size;
+ coalesced++;
+ break;
+ }
+ else if ( adjacent < 0 ) {
+ rgn->region[i].size += size;
+ coalesced++;
+ break;
+ }
+ }
+
+ if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
+ lmb_coalesce_regions(rgn, i, i+1);
+ coalesced++;
+ }
+
+ if ( coalesced ) {
+ return coalesced;
+ } else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
+ return -1;
+ }
+
+ /* Couldn't coalesce the LMB, so add it to the sorted table. */
+ for (i=rgn->cnt-1; i >= 0 ;i--) {
+ if (base < rgn->region[i].base) {
+ rgn->region[i+1].base = rgn->region[i].base;
+ rgn->region[i+1].physbase = rgn->region[i].physbase;
+ rgn->region[i+1].size = rgn->region[i].size;
+ rgn->region[i+1].type = rgn->region[i].type;
+ } else {
+ rgn->region[i+1].base = base;
+ rgn->region[i+1].physbase = lmb_abs_to_phys(base);
+ rgn->region[i+1].size = size;
+ rgn->region[i+1].type = type;
+ break;
+ }
+ }
+ rgn->cnt++;
+
+ return 0;
+}
+
+long
+lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
+{
+ unsigned long i;
+
+ for (i=0; i < rgn->cnt ;i++) {
+ unsigned long rgnbase = rgn->region[i].base;
+ unsigned long rgnsize = rgn->region[i].size;
+ if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
+ break;
+ }
+ }
+
+ return (i < rgn->cnt) ? i : -1;
+}
+
+
+unsigned long
+lmb_alloc(unsigned long size, unsigned long align)
+{
+ long i, j;
+ unsigned long base;
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_mem = &(_lmb->memory);
+ struct lmb_region *_rsv = &(_lmb->reserved);
+
+ for (i=_mem->cnt-1; i >= 0 ;i--) {
+ unsigned long lmbbase = _mem->region[i].base;
+ unsigned long lmbsize = _mem->region[i].size;
+ unsigned long lmbtype = _mem->region[i].type;
+
+ if ( lmbtype != LMB_MEMORY_AREA )
+ continue;
+
+ base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
+
+ while ( (lmbbase <= base) &&
+ ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) {
+ base = _ALIGN_DOWN(_rsv->region[j].base-size, align);
+ }
+
+ if ( (base != 0) && (lmbbase <= base) )
+ break;
+ }
+
+ if ( i < 0 )
+ return 0;
+
+ lmb_add_region(_rsv, base, size, LMB_MEMORY_AREA);
+
+ return base;
+}
+
+unsigned long
+lmb_phys_mem_size(void)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_mem = &(_lmb->memory);
+ unsigned long idx = _mem->cnt-1;
+ unsigned long lastbase = _mem->region[idx].physbase;
+ unsigned long lastsize = _mem->region[idx].size;
+
+ return (lastbase + lastsize);
+}
+
+unsigned long
+lmb_end_of_DRAM(void)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_mem = &(_lmb->memory);
+ unsigned long idx = _mem->cnt-1;
+#ifdef CONFIG_MSCHUNKS
+ unsigned long lastbase = _mem->region[idx].physbase;
+#else
+ unsigned long lastbase = _mem->region[idx].base;
+#endif /* CONFIG_MSCHUNKS */
+ unsigned long lastsize = _mem->region[idx].size;
+
+ return (lastbase + lastsize);
+}
+
+
+unsigned long
+lmb_abs_to_phys(unsigned long aa)
+{
+ unsigned long i, pa = 0;
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct lmb_region *_mem = &(_lmb->memory);
+
+ for (i=0; i < _mem->cnt ;i++) {
+ unsigned long lmbbase = _mem->region[i].base;
+ unsigned long lmbsize = _mem->region[i].size;
+ if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
+ pa = _mem->region[i].physbase + (aa - lmbbase);
+ break;
+ }
+ }
+
+ return pa;
+}
+
+void
+lmb_dump(char *str)
+{
+ unsigned long i;
+
+ udbg_printf("\nlmb_dump: %s\n", str);
+ udbg_printf(" debug = %s\n",
+ (lmb.debug) ? "TRUE" : "FALSE");
+ udbg_printf(" memory.cnt = %d\n",
+ lmb.memory.cnt);
+ udbg_printf(" memory.size = 0x%lx\n",
+ lmb.memory.size);
+ udbg_printf(" memory.lcd_size = 0x%lx\n",
+ lmb.memory.lcd_size);
+ for (i=0; i < lmb.memory.cnt ;i++) {
+ udbg_printf(" memory.region[%d].base = 0x%lx\n",
+ i, lmb.memory.region[i].base);
+ udbg_printf(" .physbase = 0x%lx\n",
+ lmb.memory.region[i].physbase);
+ udbg_printf(" .size = 0x%lx\n",
+ lmb.memory.region[i].size);
+ udbg_printf(" .type = 0x%lx\n",
+ lmb.memory.region[i].type);
+ }
+
+ udbg_printf("\n");
+ udbg_printf(" reserved.cnt = %d\n",
+ lmb.reserved.cnt);
+ udbg_printf(" reserved.size = 0x%lx\n",
+ lmb.reserved.size);
+ udbg_printf(" reserved.lcd_size = 0x%lx\n",
+ lmb.reserved.lcd_size);
+ for (i=0; i < lmb.reserved.cnt ;i++) {
+ udbg_printf(" reserved.region[%d].base = 0x%lx\n",
+ i, lmb.reserved.region[i].base);
+ udbg_printf(" .physbase = 0x%lx\n",
+ lmb.reserved.region[i].physbase);
+ udbg_printf(" .size = 0x%lx\n",
+ lmb.reserved.region[i].size);
+ udbg_printf(" .type = 0x%lx\n",
+ lmb.reserved.region[i].type);
+ }
+}
--- /dev/null
+/*
+ * c 2001 PowerPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _PPC_KERNEL_LOCAL_IRQ_H
+#define _PPC_KERNEL_LOCAL_IRQ_H
+
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+
+void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
+
+#define NR_MASK_WORDS ((NR_IRQS + 63) / 64)
+
+extern int ppc_spurious_interrupts;
+extern int ppc_second_irq;
+extern struct irqaction *ppc_irq_action[NR_IRQS];
+
+#endif /* _PPC_KERNEL_LOCAL_IRQ_H */
--- /dev/null
+/*
+ * mf.c
+ * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
+ *
+ * This modules exists as an interface between a Linux secondary partition
+ * running on an iSeries and the primary partition's Virtual Service
+ * Processor (VSP) object. The VSP has final authority over powering on/off
+ * all partitions in the iSeries. It also provides miscellaneous low-level
+ * machine facility type operations.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/iSeries/mf.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <asm/iSeries/HvLpConfig.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <asm/nvram.h>
+#include <asm/time.h>
+#include <asm/iSeries/ItSpCommArea.h>
+#include <asm/iSeries/mf_proc.h>
+#include <asm/iSeries/iSeries_proc.h>
+#include <asm/uaccess.h>
+#include <linux/pci.h>
+
+extern struct pci_dev * iSeries_vio_dev;
+
+/*
+ * This is the structure layout for the Machine Facilites LPAR event
+ * flows.
+ */
+struct VspCmdData;
+struct CeMsgData;
+union SafeCast
+{
+ u64 ptrAsU64;
+ void *ptr;
+};
+
+
+typedef void (*CeMsgCompleteHandler)( void *token, struct CeMsgData *vspCmdRsp );
+
+struct CeMsgCompleteData
+{
+ CeMsgCompleteHandler xHdlr;
+ void *xToken;
+};
+
+struct VspRspData
+{
+ struct semaphore *xSemaphore;
+ struct VspCmdData *xResponse;
+};
+
+struct IoMFLpEvent
+{
+ struct HvLpEvent xHvLpEvent;
+
+ u16 xSubtypeRc;
+ u16 xRsvd1;
+ u32 xRsvd2;
+
+ union
+ {
+
+ struct AllocData
+ {
+ u16 xSize;
+ u16 xType;
+ u32 xCount;
+ u16 xRsvd3;
+ u8 xRsvd4;
+ HvLpIndex xTargetLp;
+ } xAllocData;
+
+ struct CeMsgData
+ {
+ u8 xCEMsg[12];
+ char xReserved[4];
+ struct CeMsgCompleteData *xToken;
+ } xCEMsgData;
+
+ struct VspCmdData
+ {
+ union SafeCast xTokenUnion;
+ u16 xCmd;
+ HvLpIndex xLpIndex;
+ u8 xRc;
+ u32 xReserved1;
+
+ union VspCmdSubData
+ {
+ struct
+ {
+ u64 xState;
+ } xGetStateOut;
+
+ struct
+ {
+ u64 xIplType;
+ } xGetIplTypeOut, xFunction02SelectIplTypeIn;
+
+ struct
+ {
+ u64 xIplMode;
+ } xGetIplModeOut, xFunction02SelectIplModeIn;
+
+ struct
+ {
+ u64 xPage[4];
+ } xGetSrcHistoryIn;
+
+ struct
+ {
+ u64 xFlag;
+ } xGetAutoIplWhenPrimaryIplsOut,
+ xSetAutoIplWhenPrimaryIplsIn,
+ xWhiteButtonPowerOffIn,
+ xFunction08FastPowerOffIn,
+ xIsSpcnRackPowerIncompleteOut;
+
+ struct
+ {
+ u64 xToken;
+ u64 xAddressType;
+ u64 xSide;
+ u32 xTransferLength;
+ u32 xOffset;
+ } xSetKernelImageIn,
+ xGetKernelImageIn,
+ xSetKernelCmdLineIn,
+ xGetKernelCmdLineIn;
+
+ struct
+ {
+ u32 xTransferLength;
+ } xGetKernelImageOut,xGetKernelCmdLineOut;
+
+
+ u8 xReserved2[80];
+
+ } xSubData;
+ } xVspCmd;
+ } xUnion;
+};
+
+
+/*
+ * All outgoing event traffic is kept on a FIFO queue. The first
+ * pointer points to the one that is outstanding, and all new
+ * requests get stuck on the end. Also, we keep a certain number of
+ * preallocated stack elements so that we can operate very early in
+ * the boot up sequence (before kmalloc is ready).
+ */
+struct StackElement
+{
+ struct StackElement * next;
+ struct IoMFLpEvent event;
+ MFCompleteHandler hdlr;
+ char dmaData[72];
+ unsigned dmaDataLength;
+ unsigned remoteAddress;
+};
+static spinlock_t spinlock;
+static struct StackElement * head = NULL;
+static struct StackElement * tail = NULL;
+static struct StackElement * avail = NULL;
+static struct StackElement prealloc[16];
+
+/*
+ * Put a stack element onto the available queue, so it can get reused.
+ * Attention! You must have the spinlock before calling!
+ */
+void free( struct StackElement * element )
+{
+ if ( element != NULL )
+ {
+ element->next = avail;
+ avail = element;
+ }
+}
+
+/*
+ * Enqueue the outbound event onto the stack. If the queue was
+ * empty to begin with, we must also issue it via the Hypervisor
+ * interface. There is a section of code below that will touch
+ * the first stack pointer without the protection of the spinlock.
+ * This is OK, because we know that nobody else will be modifying
+ * the first pointer when we do this.
+ */
+static int signalEvent( struct StackElement * newElement )
+{
+ int rc = 0;
+ unsigned long flags;
+ int go = 1;
+ struct StackElement * element;
+ HvLpEvent_Rc hvRc;
+
+ /* enqueue the event */
+ if ( newElement != NULL )
+ {
+ spin_lock_irqsave( &spinlock, flags );
+ if ( head == NULL )
+ head = newElement;
+ else {
+ go = 0;
+ tail->next = newElement;
+ }
+ newElement->next = NULL;
+ tail = newElement;
+ spin_unlock_irqrestore( &spinlock, flags );
+ }
+
+ /* send the event */
+ while ( go )
+ {
+ go = 0;
+
+ /* any DMA data to send beforehand? */
+ if ( head->dmaDataLength > 0 )
+ HvCallEvent_dmaToSp( head->dmaData, head->remoteAddress, head->dmaDataLength, HvLpDma_Direction_LocalToRemote );
+
+ hvRc = HvCallEvent_signalLpEvent(&head->event.xHvLpEvent);
+ if ( hvRc != HvLpEvent_Rc_Good )
+ {
+ printk( KERN_ERR "mf.c: HvCallEvent_signalLpEvent() failed with %d\n", (int)hvRc );
+
+ spin_lock_irqsave( &spinlock, flags );
+ element = head;
+ head = head->next;
+ if ( head != NULL )
+ go = 1;
+ spin_unlock_irqrestore( &spinlock, flags );
+
+ if ( element == newElement )
+ rc = -EIO;
+ else {
+ if ( element->hdlr != NULL )
+ {
+ union SafeCast mySafeCast;
+ mySafeCast.ptrAsU64 = element->event.xHvLpEvent.xCorrelationToken;
+ (*element->hdlr)( mySafeCast.ptr, -EIO );
+ }
+ }
+
+ spin_lock_irqsave( &spinlock, flags );
+ free( element );
+ spin_unlock_irqrestore( &spinlock, flags );
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Allocate a new StackElement structure, and initialize it.
+ */
+static struct StackElement * newStackElement( void )
+{
+ struct StackElement * newElement = NULL;
+ HvLpIndex primaryLp = HvLpConfig_getPrimaryLpIndex();
+ unsigned long flags;
+
+ if ( newElement == NULL )
+ {
+ spin_lock_irqsave( &spinlock, flags );
+ if ( avail != NULL )
+ {
+ newElement = avail;
+ avail = avail->next;
+ }
+ spin_unlock_irqrestore( &spinlock, flags );
+ }
+
+ if ( newElement == NULL )
+ newElement = kmalloc(sizeof(struct StackElement),GFP_ATOMIC);
+
+ if ( newElement == NULL )
+ {
+ printk( KERN_ERR "mf.c: unable to kmalloc %ld bytes\n", sizeof(struct StackElement) );
+ return NULL;
+ }
+
+ memset( newElement, 0, sizeof(struct StackElement) );
+ newElement->event.xHvLpEvent.xFlags.xValid = 1;
+ newElement->event.xHvLpEvent.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
+ newElement->event.xHvLpEvent.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
+ newElement->event.xHvLpEvent.xFlags.xFunction = HvLpEvent_Function_Int;
+ newElement->event.xHvLpEvent.xType = HvLpEvent_Type_MachineFac;
+ newElement->event.xHvLpEvent.xSourceLp = HvLpConfig_getLpIndex();
+ newElement->event.xHvLpEvent.xTargetLp = primaryLp;
+ newElement->event.xHvLpEvent.xSizeMinus1 = sizeof(newElement->event)-1;
+ newElement->event.xHvLpEvent.xRc = HvLpEvent_Rc_Good;
+ newElement->event.xHvLpEvent.xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primaryLp,HvLpEvent_Type_MachineFac);
+ newElement->event.xHvLpEvent.xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primaryLp,HvLpEvent_Type_MachineFac);
+
+ return newElement;
+}
+
+static int signalVspInstruction( struct VspCmdData *vspCmd )
+{
+ struct StackElement * newElement = newStackElement();
+ int rc = 0;
+ struct VspRspData response;
+ DECLARE_MUTEX_LOCKED(Semaphore);
+ response.xSemaphore = &Semaphore;
+ response.xResponse = vspCmd;
+
+ if ( newElement == NULL )
+ rc = -ENOMEM;
+ else {
+ newElement->event.xHvLpEvent.xSubtype = 6;
+ newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('V'<<8)+('I'<<0);
+ newElement->event.xUnion.xVspCmd.xTokenUnion.ptr = &response;
+ newElement->event.xUnion.xVspCmd.xCmd = vspCmd->xCmd;
+ newElement->event.xUnion.xVspCmd.xLpIndex = HvLpConfig_getLpIndex();
+ newElement->event.xUnion.xVspCmd.xRc = 0xFF;
+ newElement->event.xUnion.xVspCmd.xReserved1 = 0;
+ memcpy(&(newElement->event.xUnion.xVspCmd.xSubData),&(vspCmd->xSubData), sizeof(vspCmd->xSubData));
+ mb();
+
+ rc = signalEvent(newElement);
+ }
+
+ if (rc == 0)
+ {
+ down(&Semaphore);
+ }
+
+ return rc;
+}
+
+
+/*
+ * Send a 12-byte CE message to the primary partition VSP object
+ */
+static int signalCEMsg( char * ceMsg, void * token )
+{
+ struct StackElement * newElement = newStackElement();
+ int rc = 0;
+
+ if ( newElement == NULL )
+ rc = -ENOMEM;
+ else {
+ newElement->event.xHvLpEvent.xSubtype = 0;
+ newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('C'<<8)+('E'<<0);
+ memcpy( newElement->event.xUnion.xCEMsgData.xCEMsg, ceMsg, 12 );
+ newElement->event.xUnion.xCEMsgData.xToken = token;
+ rc = signalEvent(newElement);
+ }
+
+ return rc;
+}
+
+/*
+ * Send a 12-byte CE message and DMA data to the primary partition VSP object
+ */
+static int dmaAndSignalCEMsg( char * ceMsg, void * token, void * dmaData, unsigned dmaDataLength, unsigned remoteAddress )
+{
+ struct StackElement * newElement = newStackElement();
+ int rc = 0;
+
+ if ( newElement == NULL )
+ rc = -ENOMEM;
+ else {
+ newElement->event.xHvLpEvent.xSubtype = 0;
+ newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('C'<<8)+('E'<<0);
+ memcpy( newElement->event.xUnion.xCEMsgData.xCEMsg, ceMsg, 12 );
+ newElement->event.xUnion.xCEMsgData.xToken = token;
+ memcpy( newElement->dmaData, dmaData, dmaDataLength );
+ newElement->dmaDataLength = dmaDataLength;
+ newElement->remoteAddress = remoteAddress;
+ rc = signalEvent(newElement);
+ }
+
+ return rc;
+}
+
+/*
+ * Initiate a nice (hopefully) shutdown of Linux. We simply are
+ * going to try and send the init process a SIGINT signal. If
+ * this fails (why?), we'll simply force it off in a not-so-nice
+ * manner.
+ */
+static int shutdown( void )
+{
+ int rc = kill_proc(1,SIGINT,1);
+
+ if ( rc )
+ {
+ printk( KERN_ALERT "mf.c: SIGINT to init failed (%d), hard shutdown commencing\n", rc );
+ mf_powerOff();
+ }
+ else
+ printk( KERN_ALERT "mf.c: init has been successfully notified to proceed with shutdown\n" );
+
+ return rc;
+}
+
+/*
+ * The primary partition VSP object is sending us a new
+ * event flow. Handle it...
+ */
+static void intReceived( struct IoMFLpEvent * event )
+{
+ int freeIt = 0;
+ struct StackElement * two = NULL;
+ /* ack the interrupt */
+ event->xHvLpEvent.xRc = HvLpEvent_Rc_Good;
+ HvCallEvent_ackLpEvent( &event->xHvLpEvent );
+
+ /* process interrupt */
+ switch( event->xHvLpEvent.xSubtype )
+ {
+ case 0: /* CE message */
+ switch( event->xUnion.xCEMsgData.xCEMsg[3] )
+ {
+ case 0x5B: /* power control notification */
+ if ( (event->xUnion.xCEMsgData.xCEMsg[5]&0x20) != 0 )
+ {
+ printk( KERN_ALERT "mf.c: Commencing partition shutdown\n" );
+ if ( shutdown() == 0 )
+ signalCEMsg( "\x00\x00\x00\xDB\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
+ }
+ break;
+ case 0xC0: /* get time */
+ {
+ if ( (head != NULL) && ( head->event.xUnion.xCEMsgData.xCEMsg[3] == 0x40 ) )
+ {
+ freeIt = 1;
+ if ( head->event.xUnion.xCEMsgData.xToken != 0 )
+ {
+ CeMsgCompleteHandler xHdlr = head->event.xUnion.xCEMsgData.xToken->xHdlr;
+ void * token = head->event.xUnion.xCEMsgData.xToken->xToken;
+
+ if (xHdlr != NULL)
+ (*xHdlr)( token, &(event->xUnion.xCEMsgData) );
+ }
+ }
+ }
+ break;
+ }
+
+ /* remove from queue */
+ if ( freeIt == 1 )
+ {
+ unsigned long flags;
+ spin_lock_irqsave( &spinlock, flags );
+ if ( head != NULL )
+ {
+ struct StackElement *oldHead = head;
+ head = head->next;
+ two = head;
+ free( oldHead );
+ }
+ spin_unlock_irqrestore( &spinlock, flags );
+ }
+
+ /* send next waiting event */
+ if ( two != NULL )
+ signalEvent( NULL );
+ break;
+ case 1: /* IT sys shutdown */
+ printk( KERN_ALERT "mf.c: Commencing system shutdown\n" );
+ shutdown();
+ break;
+ }
+}
+
+/*
+ * The primary partition VSP object is acknowledging the receipt
+ * of a flow we sent to them. If there are other flows queued
+ * up, we must send another one now...
+ */
+static void ackReceived( struct IoMFLpEvent * event )
+{
+ unsigned long flags;
+ struct StackElement * two = NULL;
+ unsigned long freeIt = 0;
+
+ /* handle current event */
+ if ( head != NULL )
+ {
+ switch( event->xHvLpEvent.xSubtype )
+ {
+ case 0: /* CE msg */
+ if ( event->xUnion.xCEMsgData.xCEMsg[3] == 0x40 )
+ {
+ if ( event->xUnion.xCEMsgData.xCEMsg[2] != 0 )
+ {
+ freeIt = 1;
+ if ( head->event.xUnion.xCEMsgData.xToken != 0 )
+ {
+ CeMsgCompleteHandler xHdlr = head->event.xUnion.xCEMsgData.xToken->xHdlr;
+ void * token = head->event.xUnion.xCEMsgData.xToken->xToken;
+
+ if (xHdlr != NULL)
+ (*xHdlr)( token, &(event->xUnion.xCEMsgData) );
+ }
+ }
+ } else {
+ freeIt = 1;
+ }
+ break;
+ case 4: /* allocate */
+ case 5: /* deallocate */
+ if ( head->hdlr != NULL )
+ {
+ union SafeCast mySafeCast;
+ mySafeCast.ptrAsU64 = event->xHvLpEvent.xCorrelationToken;
+ (*head->hdlr)( mySafeCast.ptr, event->xUnion.xAllocData.xCount );
+ }
+ freeIt = 1;
+ break;
+ case 6:
+ {
+ struct VspRspData *rsp = (struct VspRspData *)event->xUnion.xVspCmd.xTokenUnion.ptr;
+
+ if (rsp != NULL)
+ {
+ if (rsp->xResponse != NULL)
+ memcpy(rsp->xResponse, &(event->xUnion.xVspCmd), sizeof(event->xUnion.xVspCmd));
+ if (rsp->xSemaphore != NULL)
+ up(rsp->xSemaphore);
+ } else {
+ printk( KERN_ERR "mf.c: no rsp\n");
+ }
+ freeIt = 1;
+ }
+ break;
+ }
+ }
+ else
+ printk( KERN_ERR "mf.c: stack empty for receiving ack\n" );
+
+ /* remove from queue */
+ spin_lock_irqsave( &spinlock, flags );
+ if (( head != NULL ) && ( freeIt == 1 ))
+ {
+ struct StackElement *oldHead = head;
+ head = head->next;
+ two = head;
+ free( oldHead );
+ }
+ spin_unlock_irqrestore( &spinlock, flags );
+
+ /* send next waiting event */
+ if ( two != NULL )
+ signalEvent( NULL );
+}
+
+/*
+ * This is the generic event handler we are registering with
+ * the Hypervisor. Ensure the flows are for us, and then
+ * parse it enough to know if it is an interrupt or an
+ * acknowledge.
+ */
+static void hvHandler( struct HvLpEvent * event, struct pt_regs * regs )
+{
+ if ( (event != NULL) && (event->xType == HvLpEvent_Type_MachineFac) )
+ {
+ switch( event->xFlags.xFunction )
+ {
+ case HvLpEvent_Function_Ack:
+ ackReceived( (struct IoMFLpEvent *)event );
+ break;
+ case HvLpEvent_Function_Int:
+ intReceived( (struct IoMFLpEvent *)event );
+ break;
+ default:
+ printk( KERN_ERR "mf.c: non ack/int event received\n" );
+ break;
+ }
+ }
+ else
+ printk( KERN_ERR "mf.c: alien event received\n" );
+}
+
+/*
+ * Global kernel interface to allocate and seed events into the
+ * Hypervisor.
+ */
+void mf_allocateLpEvents( HvLpIndex targetLp,
+ HvLpEvent_Type type,
+ unsigned size,
+ unsigned count,
+ MFCompleteHandler hdlr,
+ void * userToken )
+{
+ struct StackElement * newElement = newStackElement();
+ int rc = 0;
+
+ if ( newElement == NULL )
+ rc = -ENOMEM;
+ else {
+ union SafeCast mine;
+ mine.ptr = userToken;
+ newElement->event.xHvLpEvent.xSubtype = 4;
+ newElement->event.xHvLpEvent.xCorrelationToken = mine.ptrAsU64;
+ newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('M'<<8)+('A'<<0);
+ newElement->event.xUnion.xAllocData.xTargetLp = targetLp;
+ newElement->event.xUnion.xAllocData.xType = type;
+ newElement->event.xUnion.xAllocData.xSize = size;
+ newElement->event.xUnion.xAllocData.xCount = count;
+ newElement->hdlr = hdlr;
+ rc = signalEvent(newElement);
+ }
+
+ if ( (rc != 0) && (hdlr != NULL) )
+ (*hdlr)( userToken, rc );
+}
+
+/*
+ * Global kernel interface to unseed and deallocate events already in
+ * Hypervisor.
+ */
+void mf_deallocateLpEvents( HvLpIndex targetLp,
+ HvLpEvent_Type type,
+ unsigned count,
+ MFCompleteHandler hdlr,
+ void * userToken )
+{
+ struct StackElement * newElement = newStackElement();
+ int rc = 0;
+
+ if ( newElement == NULL )
+ rc = -ENOMEM;
+ else {
+ union SafeCast mine;
+ mine.ptr = userToken;
+ newElement->event.xHvLpEvent.xSubtype = 5;
+ newElement->event.xHvLpEvent.xCorrelationToken = mine.ptrAsU64;
+ newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('M'<<8)+('D'<<0);
+ newElement->event.xUnion.xAllocData.xTargetLp = targetLp;
+ newElement->event.xUnion.xAllocData.xType = type;
+ newElement->event.xUnion.xAllocData.xCount = count;
+ newElement->hdlr = hdlr;
+ rc = signalEvent(newElement);
+ }
+
+ if ( (rc != 0) && (hdlr != NULL) )
+ (*hdlr)( userToken, rc );
+}
+
+/*
+ * Global kernel interface to tell the VSP object in the primary
+ * partition to power this partition off.
+ */
+void mf_powerOff( void )
+{
+ printk( KERN_ALERT "mf.c: Down it goes...\n" );
+ signalCEMsg( "\x00\x00\x00\x4D\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
+ for (;;);
+}
+
+/*
+ * Global kernel interface to tell the VSP object in the primary
+ * partition to reboot this partition.
+ */
+void mf_reboot( void )
+{
+ printk( KERN_ALERT "mf.c: Preparing to bounce...\n" );
+ signalCEMsg( "\x00\x00\x00\x4E\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
+ for (;;);
+}
+
+/*
+ * Display a single word SRC onto the VSP control panel.
+ */
+void mf_displaySrc( u32 word )
+{
+ u8 ce[12];
+
+ memcpy( ce, "\x00\x00\x00\x4A\x00\x00\x00\x01\x00\x00\x00\x00", 12 );
+ ce[8] = word>>24;
+ ce[9] = word>>16;
+ ce[10] = word>>8;
+ ce[11] = word;
+ signalCEMsg( ce, NULL );
+}
+
+/*
+ * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
+ */
+void mf_displayProgress( u16 value )
+{
+ u8 ce[12];
+ u8 src[72];
+
+ memcpy( ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12 );
+ memcpy( src,
+ "\x01\x00\x00\x01"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "PROGxxxx"
+ " ",
+ 72 );
+ src[6] = value>>8;
+ src[7] = value&255;
+ src[44] = "0123456789ABCDEF"[(value>>12)&15];
+ src[45] = "0123456789ABCDEF"[(value>>8)&15];
+ src[46] = "0123456789ABCDEF"[(value>>4)&15];
+ src[47] = "0123456789ABCDEF"[value&15];
+ dmaAndSignalCEMsg( ce, NULL, src, sizeof(src), 9*64*1024 );
+}
+
+/*
+ * Clear the VSP control panel. Used to "erase" an SRC that was
+ * previously displayed.
+ */
+void mf_clearSrc( void )
+{
+ signalCEMsg( "\x00\x00\x00\x4B\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
+}
+
+/*
+ * Initialization code here.
+ */
+void mf_init( void )
+{
+ int i;
+
+ /* initialize */
+ spin_lock_init( &spinlock );
+ for ( i = 0; i < sizeof(prealloc)/sizeof(*prealloc); ++i )
+ free( &prealloc[i] );
+ HvLpEvent_registerHandler( HvLpEvent_Type_MachineFac, &hvHandler );
+
+ /* virtual continue ack */
+ signalCEMsg( "\x00\x00\x00\x57\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
+
+ /* initialization complete */
+ printk( KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities initialized\n" );
+
+ iSeries_proc_callback(&mf_proc_init);
+}
+
+void mf_setSide(char side)
+{
+ int rc = 0;
+ u64 newSide = 0;
+ struct VspCmdData myVspCmd;
+
+ memset(&myVspCmd, 0, sizeof(myVspCmd));
+ if (side == 'A')
+ newSide = 0;
+ else if (side == 'B')
+ newSide = 1;
+ else if (side == 'C')
+ newSide = 2;
+ else
+ newSide = 3;
+
+ myVspCmd.xSubData.xFunction02SelectIplTypeIn.xIplType = newSide;
+ myVspCmd.xCmd = 10;
+
+ rc = signalVspInstruction(&myVspCmd);
+}
+
+char mf_getSide(void)
+{
+ char returnValue = ' ';
+ int rc = 0;
+ struct VspCmdData myVspCmd;
+
+ memset(&myVspCmd, 0, sizeof(myVspCmd));
+ myVspCmd.xCmd = 2;
+ myVspCmd.xSubData.xFunction02SelectIplTypeIn.xIplType = 0;
+ mb();
+ rc = signalVspInstruction(&myVspCmd);
+
+ if (rc != 0)
+ {
+ return returnValue;
+ } else {
+ if (myVspCmd.xRc == 0)
+ {
+ if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 0)
+ returnValue = 'A';
+ else if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 1)
+ returnValue = 'B';
+ else if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 2)
+ returnValue = 'C';
+ else
+ returnValue = 'D';
+ }
+ }
+
+ return returnValue;
+}
+
+void mf_getSrcHistory(char *buffer, int size)
+{
+ /* struct IplTypeReturnStuff returnStuff;
+ struct StackElement * newElement = newStackElement();
+ int rc = 0;
+ char *pages[4];
+
+ pages[0] = kmalloc(4096, GFP_ATOMIC);
+ pages[1] = kmalloc(4096, GFP_ATOMIC);
+ pages[2] = kmalloc(4096, GFP_ATOMIC);
+ pages[3] = kmalloc(4096, GFP_ATOMIC);
+ if (( newElement == NULL ) || (pages[0] == NULL) || (pages[1] == NULL) || (pages[2] == NULL) || (pages[3] == NULL))
+ rc = -ENOMEM;
+ else
+ {
+ returnStuff.xType = 0;
+ returnStuff.xRc = 0;
+ returnStuff.xDone = 0;
+ newElement->event.xHvLpEvent.xSubtype = 6;
+ newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('V'<<8)+('I'<<0);
+ newElement->event.xUnion.xVspCmd.xEvent = &returnStuff;
+ newElement->event.xUnion.xVspCmd.xCmd = 4;
+ newElement->event.xUnion.xVspCmd.xLpIndex = HvLpConfig_getLpIndex();
+ newElement->event.xUnion.xVspCmd.xRc = 0xFF;
+ newElement->event.xUnion.xVspCmd.xReserved1 = 0;
+ newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[0] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[0]));
+ newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[1] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[1]));
+ newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[2] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[2]));
+ newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[3] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[3]));
+ mb();
+ rc = signalEvent(newElement);
+ }
+
+ if (rc != 0)
+ {
+ return;
+ }
+ else
+ {
+ while (returnStuff.xDone != 1)
+ {
+ udelay(10);
+ }
+
+ if (returnStuff.xRc == 0)
+ {
+ memcpy(buffer, pages[0], size);
+ }
+ }
+
+ kfree(pages[0]);
+ kfree(pages[1]);
+ kfree(pages[2]);
+ kfree(pages[3]);*/
+}
+
+void mf_setCmdLine(const char *cmdline, int size, u64 side)
+{
+ struct VspCmdData myVspCmd;
+ int rc = 0;
+ dma_addr_t dma_addr = 0;
+ char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
+
+ if (page == NULL) {
+ printk(KERN_ERR "mf.c: couldn't allocate memory to set command line\n");
+ return;
+ }
+
+ copy_from_user(page, cmdline, size);
+
+ memset(&myVspCmd, 0, sizeof(myVspCmd));
+ myVspCmd.xCmd = 31;
+ myVspCmd.xSubData.xSetKernelCmdLineIn.xToken = dma_addr;
+ myVspCmd.xSubData.xSetKernelCmdLineIn.xAddressType = HvLpDma_AddressType_TceIndex;
+ myVspCmd.xSubData.xSetKernelCmdLineIn.xSide = side;
+ myVspCmd.xSubData.xSetKernelCmdLineIn.xTransferLength = size;
+ mb();
+ rc = signalVspInstruction(&myVspCmd);
+
+ pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
+}
+
+int mf_getCmdLine(char *cmdline, int *size, u64 side)
+{
+ struct VspCmdData myVspCmd;
+ int rc = 0;
+ int len = *size;
+ dma_addr_t dma_addr = pci_map_single(iSeries_vio_dev, cmdline, *size, PCI_DMA_FROMDEVICE);
+
+ memset(cmdline, 0, *size);
+ memset(&myVspCmd, 0, sizeof(myVspCmd));
+ myVspCmd.xCmd = 33;
+ myVspCmd.xSubData.xGetKernelCmdLineIn.xToken = dma_addr;
+ myVspCmd.xSubData.xGetKernelCmdLineIn.xAddressType = HvLpDma_AddressType_TceIndex;
+ myVspCmd.xSubData.xGetKernelCmdLineIn.xSide = side;
+ myVspCmd.xSubData.xGetKernelCmdLineIn.xTransferLength = *size;
+ mb();
+ rc = signalVspInstruction(&myVspCmd);
+
+ if ( ! rc ) {
+
+ if (myVspCmd.xRc == 0)
+ {
+ len = myVspCmd.xSubData.xGetKernelCmdLineOut.xTransferLength;
+ }
+ /* else
+ {
+ memcpy(cmdline, "Bad cmdline", 11);
+ }
+ */
+ }
+
+ pci_unmap_single(iSeries_vio_dev, dma_addr, *size, PCI_DMA_FROMDEVICE);
+
+ return len;
+}
+
+
+int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
+{
+ struct VspCmdData myVspCmd;
+ int rc = 0;
+
+ dma_addr_t dma_addr = 0;
+
+ char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
+
+ if (page == NULL) {
+ printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
+ return -ENOMEM;
+ }
+
+ copy_from_user(page, buffer, size);
+ memset(&myVspCmd, 0, sizeof(myVspCmd));
+
+ myVspCmd.xCmd = 30;
+ myVspCmd.xSubData.xGetKernelImageIn.xToken = dma_addr;
+ myVspCmd.xSubData.xGetKernelImageIn.xAddressType = HvLpDma_AddressType_TceIndex;
+ myVspCmd.xSubData.xGetKernelImageIn.xSide = side;
+ myVspCmd.xSubData.xGetKernelImageIn.xOffset = offset;
+ myVspCmd.xSubData.xGetKernelImageIn.xTransferLength = size;
+ mb();
+ rc = signalVspInstruction(&myVspCmd);
+
+ if (rc == 0)
+ {
+ if (myVspCmd.xRc == 0)
+ {
+ rc = 0;
+ } else {
+ rc = -ENOMEM;
+ }
+ }
+
+ pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
+
+ return rc;
+}
+
+int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
+{
+ struct VspCmdData myVspCmd;
+ int rc = 0;
+ int len = *size;
+
+ dma_addr_t dma_addr = pci_map_single(iSeries_vio_dev, buffer, *size, PCI_DMA_FROMDEVICE);
+
+ memset(buffer, 0, len);
+
+ memset(&myVspCmd, 0, sizeof(myVspCmd));
+ myVspCmd.xCmd = 32;
+ myVspCmd.xSubData.xGetKernelImageIn.xToken = dma_addr;
+ myVspCmd.xSubData.xGetKernelImageIn.xAddressType = HvLpDma_AddressType_TceIndex;
+ myVspCmd.xSubData.xGetKernelImageIn.xSide = side;
+ myVspCmd.xSubData.xGetKernelImageIn.xOffset = offset;
+ myVspCmd.xSubData.xGetKernelImageIn.xTransferLength = len;
+ mb();
+ rc = signalVspInstruction(&myVspCmd);
+
+ if (rc == 0)
+ {
+ if (myVspCmd.xRc == 0)
+ {
+ *size = myVspCmd.xSubData.xGetKernelImageOut.xTransferLength;
+ } else {
+ rc = -ENOMEM;
+ }
+ }
+
+ pci_unmap_single(iSeries_vio_dev, dma_addr, *size, PCI_DMA_FROMDEVICE);
+
+ return rc;
+}
+
+int mf_setRtcTime(unsigned long time)
+{
+ struct rtc_time tm;
+
+ to_tm(time, &tm);
+
+ return mf_setRtc( &tm );
+}
+
+struct RtcTimeData
+{
+ struct semaphore *xSemaphore;
+ struct CeMsgData xCeMsg;
+ int xRc;
+};
+
+void getRtcTimeComplete(void * token, struct CeMsgData *ceMsg)
+{
+ struct RtcTimeData *rtc = (struct RtcTimeData *)token;
+
+ memcpy(&(rtc->xCeMsg), ceMsg, sizeof(rtc->xCeMsg));
+
+ rtc->xRc = 0;
+ up(rtc->xSemaphore);
+}
+
+static unsigned long lastsec = 1;
+
+int mf_getRtcTime(unsigned long *time)
+{
+/* unsigned long usec, tsec; */
+
+ u32 dataWord1 = *((u32 *)(&xSpCommArea.xBcdTimeAtIplStart));
+ u32 dataWord2 = *(((u32 *)&(xSpCommArea.xBcdTimeAtIplStart)) + 1);
+ int year = 1970;
+ int year1 = ( dataWord1 >> 24 ) & 0x000000FF;
+ int year2 = ( dataWord1 >> 16 ) & 0x000000FF;
+ int sec = ( dataWord1 >> 8 ) & 0x000000FF;
+ int min = dataWord1 & 0x000000FF;
+ int hour = ( dataWord2 >> 24 ) & 0x000000FF;
+ int day = ( dataWord2 >> 8 ) & 0x000000FF;
+ int mon = dataWord2 & 0x000000FF;
+
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year1);
+ BCD_TO_BIN(year2);
+ year = year1 * 100 + year2;
+
+ *time = mktime(year, mon, day, hour, min, sec);
+
+ *time += ( jiffies / HZ );
+
+ /* Now THIS is a nasty hack!
+ * It ensures that the first two calls to mf_getRtcTime get different
+ * answers. That way the loop in init_time (time.c) will not think
+ * the clock is stuck.
+ */
+ if ( lastsec ) {
+ *time -= lastsec;
+ --lastsec;
+ }
+
+ return 0;
+
+}
+
+int mf_getRtc( struct rtc_time * tm )
+{
+
+ struct CeMsgCompleteData ceComplete;
+ struct RtcTimeData rtcData;
+ int rc = 0;
+ DECLARE_MUTEX_LOCKED(Semaphore);
+
+ memset(&ceComplete, 0, sizeof(ceComplete));
+ memset(&rtcData, 0, sizeof(rtcData));
+
+ rtcData.xSemaphore = &Semaphore;
+
+ ceComplete.xHdlr = &getRtcTimeComplete;
+ ceComplete.xToken = (void *)&rtcData;
+
+ rc = signalCEMsg( "\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00", &ceComplete );
+
+ if ( rc == 0 )
+ {
+ down(&Semaphore);
+
+ if ( rtcData.xRc == 0)
+ {
+ if ( ( rtcData.xCeMsg.xCEMsg[2] == 0xa9 ) ||
+ ( rtcData.xCeMsg.xCEMsg[2] == 0xaf ) ) {
+ /* TOD clock is not set */
+ tm->tm_sec = 1;
+ tm->tm_min = 1;
+ tm->tm_hour = 1;
+ tm->tm_mday = 10;
+ tm->tm_mon = 8;
+ tm->tm_year = 71;
+ mf_setRtc( tm );
+ }
+ {
+ u32 dataWord1 = *((u32 *)(rtcData.xCeMsg.xCEMsg+4));
+ u32 dataWord2 = *((u32 *)(rtcData.xCeMsg.xCEMsg+8));
+ u8 year = (dataWord1 >> 16 ) & 0x000000FF;
+ u8 sec = ( dataWord1 >> 8 ) & 0x000000FF;
+ u8 min = dataWord1 & 0x000000FF;
+ u8 hour = ( dataWord2 >> 24 ) & 0x000000FF;
+ u8 day = ( dataWord2 >> 8 ) & 0x000000FF;
+ u8 mon = dataWord2 & 0x000000FF;
+
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+
+ if ( year <= 69 )
+ year += 100;
+
+ tm->tm_sec = sec;
+ tm->tm_min = min;
+ tm->tm_hour = hour;
+ tm->tm_mday = day;
+ tm->tm_mon = mon;
+ tm->tm_year = year;
+ }
+ } else {
+ rc = rtcData.xRc;
+ tm->tm_sec = 0;
+ tm->tm_min = 0;
+ tm->tm_hour = 0;
+ tm->tm_mday = 15;
+ tm->tm_mon = 5;
+ tm->tm_year = 52;
+
+ }
+ tm->tm_wday = 0;
+ tm->tm_yday = 0;
+ tm->tm_isdst = 0;
+
+ }
+
+ return rc;
+
+}
+
+int mf_setRtc(struct rtc_time * tm)
+{
+ char ceTime[12] = "\x00\x00\x00\x41\x00\x00\x00\x00\x00\x00\x00\x00";
+ int rc = 0;
+ u8 day, mon, hour, min, sec, y1, y2;
+ unsigned year;
+
+ year = 1900 + tm->tm_year;
+ y1 = year / 100;
+ y2 = year % 100;
+
+ sec = tm->tm_sec;
+ min = tm->tm_min;
+ hour = tm->tm_hour;
+ day = tm->tm_mday;
+ mon = tm->tm_mon + 1;
+
+ BIN_TO_BCD(sec);
+ BIN_TO_BCD(min);
+ BIN_TO_BCD(hour);
+ BIN_TO_BCD(mon);
+ BIN_TO_BCD(day);
+ BIN_TO_BCD(y1);
+ BIN_TO_BCD(y2);
+
+ ceTime[4] = y1;
+ ceTime[5] = y2;
+ ceTime[6] = sec;
+ ceTime[7] = min;
+ ceTime[8] = hour;
+ ceTime[10] = day;
+ ceTime[11] = mon;
+
+ rc = signalCEMsg( ceTime, NULL );
+
+ return rc;
+}
+
+
+
--- /dev/null
+/*
+ * mf_proc.c
+ * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity: */
+/* End Change Activity */
+
+#ifndef _MF_PROC_H
+#include <asm/iSeries/mf_proc.h>
+#endif
+#ifndef MF_H_INCLUDED
+#include <asm/iSeries/mf.h>
+#endif
+#include <asm/uaccess.h>
+
+static struct proc_dir_entry *mf_proc_root = NULL;
+
+int proc_mf_dump_cmdline
+(char *page, char **start, off_t off, int count, int *eof, void *data);
+
+int proc_mf_dump_vmlinux
+(char *page, char **start, off_t off, int count, int *eof, void *data);
+
+int proc_mf_dump_side
+(char *page, char **start, off_t off, int count, int *eof, void *data);
+
+int proc_mf_change_side
+(struct file *file, const char *buffer, unsigned long count, void *data);
+
+int proc_mf_dump_src
+(char *page, char **start, off_t off, int count, int *eof, void *data);
+int proc_mf_change_src (struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_mf_change_cmdline(struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_mf_change_vmlinux(struct file *file, const char *buffer, unsigned long count, void *data);
+
+
+void mf_proc_init(struct proc_dir_entry *iSeries_proc)
+{
+ struct proc_dir_entry *ent = NULL;
+ struct proc_dir_entry *mf_a = NULL;
+ struct proc_dir_entry *mf_b = NULL;
+ struct proc_dir_entry *mf_c = NULL;
+ struct proc_dir_entry *mf_d = NULL;
+
+ mf_proc_root = proc_mkdir("mf", iSeries_proc);
+ if (!mf_proc_root) return;
+
+ mf_a = proc_mkdir("A", mf_proc_root);
+ if (!mf_a) return;
+
+ ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf_a);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_mf_dump_cmdline;
+ ent->write_proc = proc_mf_change_cmdline;
+
+ ent = create_proc_entry("vmlinux", S_IFREG|S_IRUSR|S_IWUSR, mf_a);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_mf_dump_vmlinux;
+ ent->write_proc = proc_mf_change_vmlinux;
+
+ mf_b = proc_mkdir("B", mf_proc_root);
+ if (!mf_b) return;
+
+ ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf_b);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)1;
+ ent->read_proc = proc_mf_dump_cmdline;
+ ent->write_proc = proc_mf_change_cmdline;
+
+ ent = create_proc_entry("vmlinux", S_IFREG|S_IRUSR|S_IWUSR, mf_b);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)1;
+ ent->read_proc = proc_mf_dump_vmlinux;
+ ent->write_proc = proc_mf_change_vmlinux;
+
+ mf_c = proc_mkdir("C", mf_proc_root);
+ if (!mf_c) return;
+
+ ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf_c);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)2;
+ ent->read_proc = proc_mf_dump_cmdline;
+ ent->write_proc = proc_mf_change_cmdline;
+
+ ent = create_proc_entry("vmlinux", S_IFREG|S_IRUSR|S_IWUSR, mf_c);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)2;
+ ent->read_proc = proc_mf_dump_vmlinux;
+ ent->write_proc = proc_mf_change_vmlinux;
+
+ mf_d = proc_mkdir("D", mf_proc_root);
+ if (!mf_d) return;
+
+
+ ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf_d);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)3;
+ ent->read_proc = proc_mf_dump_cmdline;
+ ent->write_proc = proc_mf_change_cmdline;
+
+ ent = create_proc_entry("vmlinux", S_IFREG|S_IRUSR, mf_d);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)3;
+ ent->read_proc = proc_mf_dump_vmlinux;
+ ent->write_proc = NULL;
+
+ ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_mf_dump_side;
+ ent->write_proc = proc_mf_change_side;
+
+ ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_mf_dump_src;
+ ent->write_proc = proc_mf_change_src;
+}
+
+int proc_mf_dump_cmdline
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = count;
+ char *p;
+
+ len = mf_getCmdLine(page, &len, (u64)data);
+
+ p = page + len - 1;
+ while ( p > page ) {
+ if ( (*p == 0) || (*p == ' ') )
+ --p;
+ else
+ break;
+ }
+ if ( *p != '\n' ) {
+ ++p;
+ *p = '\n';
+ }
+ ++p;
+ *p = 0;
+ len = p - page;
+
+ len -= off;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+ *start = page + off;
+ return len;
+}
+
+int proc_mf_dump_vmlinux
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int sizeToGet = count;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0)
+ {
+ if (sizeToGet != 0)
+ {
+ *start = page + off;
+ printk("mf_proc.c: got count %d off %d\n", sizeToGet, (int)off);
+ return sizeToGet;
+ } else {
+ printk("mf_proc.c: eof\n");
+ *eof = 1;
+ return 0;
+ }
+ } else {
+ printk("mf_proc.c: eof\n");
+ *eof = 1;
+ return 0;
+ }
+}
+
+
+int proc_mf_dump_side
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+
+ char mf_current_side = mf_getSide();
+ len = sprintf(page, "%c\n", mf_current_side);
+
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+int proc_mf_change_side(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if ((*buffer != 'A') &&
+ (*buffer != 'B') &&
+ (*buffer != 'C') &&
+ (*buffer != 'D'))
+ {
+ printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
+ return -EINVAL;
+ }
+
+ mf_setSide(*buffer);
+
+ return count;
+}
+
+int proc_mf_dump_src
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+ mf_getSrcHistory(page, count);
+ len = count;
+ len -= off;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+ *start = page + off;
+ return len;
+}
+
+int proc_mf_change_src(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if ((count < 4) && (count != 1))
+ {
+ printk(KERN_ERR "mf_proc: invalid src\n");
+ return -EINVAL;
+ }
+
+ if ((count == 1) && ((*buffer) == '\0'))
+ {
+ mf_clearSrc();
+ } else {
+ mf_displaySrc(*(u32 *)buffer);
+ }
+
+ return count;
+}
+
+int proc_mf_change_cmdline(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ mf_setCmdLine(buffer, count, (u64)data);
+
+ return count;
+}
+
+int proc_mf_change_vmlinux(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ mf_setVmlinuxChunk(buffer, count, file->f_pos, (u64)data);
+ file->f_pos += count;
+
+ return count;
+}
--- /dev/null
+/*
+ * arch/ppc/kernel/misc.S
+ *
+ *
+ *
+ * This file contains miscellaneous low-level functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include "ppc_asm.h"
+
+ .text
+
+/*
+ * Returns (address we're running at) - (address we were linked at)
+ * for use before the text and data are mapped to KERNELBASE.
+ */
+
+_GLOBAL(reloc_offset)
+ mflr r0
+ bl 1f
+1: mflr r3
+ LOADADDR(r4,1b)
+ sub r3,r4,r3
+ mtlr r0
+ blr
+
+_GLOBAL(get_msr)
+ mfmsr r3
+ blr
+
+_GLOBAL(get_dar)
+ mfdar r3
+ blr
+
+_GLOBAL(get_srr0)
+ mfsrr0 r3
+ blr
+
+_GLOBAL(get_srr1)
+ mfsrr1 r3
+ blr
+
+_GLOBAL(get_sp)
+ mr r3,r1
+ blr
+
+#ifdef CONFIG_PPC_ISERIES
+/* unsigned long __no_use_save_flags(void) */
+_GLOBAL(__no_use_save_flags)
+ mfspr r4,SPRG3
+ lbz r3,PACAPROCENABLED(r4)
+ blr
+
+/* void __no_use_restore_flags(unsigned long flags) */
+_GLOBAL(__no_use_restore_flags)
+/*
+ * Just set/clear the MSR_EE bit through restore/flags but do not
+ * change anything else. This is needed by the RT system and makes
+ * sense anyway.
+ * -- Cort
+ */
+ mfspr r6,SPRG3
+ lbz r5,PACAPROCENABLED(r6)
+ /* Check if things are setup the way we want _already_. */
+ cmpw 0,r3,r5
+ beqlr
+ /* are we enabling interrupts? */
+ cmpi 0,r3,0
+ stb r3,PACAPROCENABLED(r6)
+ beqlr
+ /* Check pending interrupts */
+ CHECKANYINT(r4,r5)
+ beqlr
+
+ /*
+ * Handle pending interrupts in interrupt context
+ */
+ li r0,0x5555
+ sc
+ blr
+
+_GLOBAL(__no_use_cli)
+ mfspr r5,SPRG3
+ lbz r3,PACAPROCENABLED(r5)
+ li r4,0
+ stb r4,PACAPROCENABLED(r5)
+ blr /* Done */
+
+_GLOBAL(__no_use_sti)
+ mfspr r6,SPRG3
+ li r3,1
+ stb r3,PACAPROCENABLED(r6)
+
+ /* Check for pending interrupts
+ * A decrementer, IPI or PMC interrupt may have occurred
+ * while we were in the hypervisor (which enables)
+ */
+ CHECKANYINT(r4,r5)
+ beqlr
+
+ /*
+ * Handle pending interrupts in interrupt context
+ */
+ li r0,0x5555
+ sc
+ blr
+#endif
+/*
+ * Flush instruction cache.
+ */
+_GLOBAL(flush_instruction_cache)
+
+/*
+ * This is called by kgdb code
+ * and should probably go away
+ * to be replaced by invalidating
+ * the cache lines that are actually
+ * modified
+ */
+ /* use invalidate-all bit in HID0
+ * - is this consistent across all 64-bit cpus? -- paulus */
+ mfspr r3,HID0
+ ori r3,r3,HID0_ICFI
+ mtspr HID0,r3
+ sync
+ isync
+ blr
+
+/*
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ *
+ * flush_icache_range(unsigned long start, unsigned long stop)
+ *
+ * flush all bytes from start through stop-1 inclusive
+ */
+
+_GLOBAL(flush_icache_range)
+
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ * and in some cases i-cache and d-cache line sizes differ from
+ * each other.
+ */
+ LOADADDR(r10,naca) /* Get Naca address */
+ ld r10,0(r10)
+ lhz r7,DCACHEL1LINESIZE(r10) /* Get cache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lhz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+1: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 1b
+ sync
+
+/* Now invalidate the instruction cache */
+
+ lhz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5
+ lhz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+2: icbi 0,r6
+ add r6,r6,r7
+ bdnz 2b
+ isync
+ blr
+
+/*
+ * Like above, but only do the D-cache.
+ *
+ * flush_dcache_range(unsigned long start, unsigned long stop)
+ *
+ * flush all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(flush_dcache_range)
+
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ */
+ LOADADDR(r10,naca) /* Get Naca address */
+ ld r10,0(r10)
+ lhz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lhz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+0: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ blr
+
+/*
+ * Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ * void __flush_dcache_icache(void *page)
+ */
+_GLOBAL(__flush_dcache_icache)
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ */
+
+/* Flush the dcache */
+ LOADADDR(r7,naca)
+ ld r7,0(r7)
+ clrrdi r3,r3,12 /* Page align */
+ lhz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
+ lhz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
+ mr r6,r3
+ mtctr r4
+0: dcbst 0,r6
+ add r6,r6,r5
+ bdnz 0b
+ sync
+
+/* Now invalidate the icache */
+
+ lhz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
+ lhz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
+ mtctr r4
+1: icbi 0,r3
+ add r3,r3,r5
+ bdnz 1b
+ isync
+ blr
+
+/*
+ * Copy a whole page. Assumes a 4096B page size.
+ */
+_GLOBAL(copy_page)
+ clrrdi r3,r3,12 /* Page align */
+ clrrdi r4,r4,12 /* Page align */
+ li r5,256
+ mtctr r5
+ addi r3,r3,-8
+ addi r4,r4,-8
+
+1: ld r6,8(r4)
+ ldu r7,16(r4)
+ std r6,8(r3)
+ stdu r7,16(r3)
+ bdnz+ 1b
+ blr
+
+/*
+ * I/O string operations
+ *
+ * insb(port, buf, len)
+ * outsb(port, buf, len)
+ * insw(port, buf, len)
+ * outsw(port, buf, len)
+ * insl(port, buf, len)
+ * outsl(port, buf, len)
+ * insw_ns(port, buf, len)
+ * outsw_ns(port, buf, len)
+ * insl_ns(port, buf, len)
+ * outsl_ns(port, buf, len)
+ *
+ * The *_ns versions don't do byte-swapping.
+ */
+_GLOBAL(_insb)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,1
+ blelr-
+00: lbz r5,0(r3)
+ eieio
+ stbu r5,1(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsb)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,1
+ blelr-
+00: lbzu r5,1(r4)
+ stb r5,0(r3)
+ eieio
+ bdnz 00b
+ blr
+
+_GLOBAL(_insw)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhbrx r5,0,r3
+ eieio
+ sthu r5,2(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsw)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhzu r5,2(r4)
+ eieio
+ sthbrx r5,0,r3
+ bdnz 00b
+ blr
+
+_GLOBAL(_insl)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwbrx r5,0,r3
+ eieio
+ stwu r5,4(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsl)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwzu r5,4(r4)
+ stwbrx r5,0,r3
+ eieio
+ bdnz 00b
+ blr
+
+_GLOBAL(ide_insw)
+_GLOBAL(_insw_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhz r5,0(r3)
+ eieio
+ sthu r5,2(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(ide_outsw)
+_GLOBAL(_outsw_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhzu r5,2(r4)
+ sth r5,0(r3)
+ eieio
+ bdnz 00b
+ blr
+
+_GLOBAL(_insl_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwz r5,0(r3)
+ eieio
+ stwu r5,4(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsl_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwzu r5,4(r4)
+ stw r5,0(r3)
+ eieio
+ bdnz 00b
+ blr
+
+/*
+ * Extended precision shifts
+ *
+ * R3/R4 has 64 bit value
+ * R5 has shift count
+ * result in R3/R4
+ *
+ * ashrdi3: XXXYYY/ZZZAAA -> SSSXXX/YYYZZZ
+ * ashldi3: XXXYYY/ZZZAAA -> YYYZZZ/AAA000
+ * lshrdi3: XXXYYY/ZZZAAA -> 000XXX/YYYZZZ
+ */
+/* MIKEC: These may no longer be needed...what does gcc expect ? */
+
+_GLOBAL(__ashrdi3)
+ li r6,32
+ sub r6,r6,r5
+ slw r7,r3,r6 /* isolate YYY */
+ srw r4,r4,r5 /* isolate ZZZ */
+ or r4,r4,r7 /* YYYZZZ */
+ sraw r3,r3,r5 /* SSSXXX */
+ blr
+
+_GLOBAL(__ashldi3)
+ li r6,32
+ sub r6,r6,r5
+ srw r7,r4,r6 /* isolate ZZZ */
+ slw r4,r4,r5 /* AAA000 */
+ slw r3,r3,r5 /* YYY--- */
+ or r3,r3,r7 /* YYYZZZ */
+ blr
+
+_GLOBAL(__lshrdi3)
+ li r6,32
+ sub r6,r6,r5
+ slw r7,r3,r6 /* isolate YYY */
+ srw r4,r4,r5 /* isolate ZZZ */
+ or r4,r4,r7 /* YYYZZZ */
+ srw r3,r3,r5 /* 000XXX */
+ blr
+
+_GLOBAL(abs)
+ cmpi 0,r3,0
+ bge 10f
+ neg r3,r3
+10: blr
+
+_GLOBAL(_get_SP)
+ mr r3,r1 /* Close enough */
+ blr
+
+_GLOBAL(_get_PVR)
+ mfspr r3,PVR
+ blr
+
+_GLOBAL(_get_PIR)
+ mfspr r3,PIR
+ blr
+
+_GLOBAL(_get_HID0)
+ mfspr r3,HID0
+ blr
+
+_GLOBAL(cvt_fd)
+ lfd 0,-4(r5) /* load up fpscr value */
+ mtfsf 0xff,0
+ lfs 0,0(r3)
+ stfd 0,0(r4)
+ mffs 0 /* save new fpscr value */
+ stfd 0,-4(r5)
+ blr
+
+_GLOBAL(cvt_df)
+ lfd 0,-4(r5) /* load up fpscr value */
+ mtfsf 0xff,0
+ lfd 0,0(r3)
+ stfs 0,0(r4)
+ mffs 0 /* save new fpscr value */
+ stfd 0,-4(r5)
+ blr
+
+/*
+ * Create a kernel thread
+ * kernel_thread(fn, arg, flags)
+ */
+_GLOBAL(kernel_thread)
+ mr r6,r3 /* function */
+ ori r3,r5,CLONE_VM /* flags */
+ li r0,__NR_clone
+ sc
+ cmpi 0,r3,0 /* parent or child? */
+ bnelr /* return if parent */
+
+ li r0,0 /* clear out p->thread.regs */
+ std r0,THREAD+PT_REGS(r13) /* since we don't have user ctx */
+
+ clrrdi r5,r1,THREAD_SHIFT
+ ld r0,TI_FLAGS(r5)
+ li r7,_TIF_32BIT
+ andc r0,r0,r7
+#ifdef CONFIG_PPC_ISERIES
+ ori r0,r0,_TIF_RUN_LIGHT /* Run light on */
+#endif
+ std r0,TI_FLAGS(r5)
+
+ ld r2,8(r6)
+ ld r6,0(r6)
+ mtlr r6 /* fn addr in lr */
+ mr r3,r4 /* load arg and call fn */
+ blrl
+ li r0,__NR_exit /* exit after child exits */
+ li r3,0
+ sc
+
+#ifdef CONFIG_BINFMT_ELF32
+/* Why isn't this a) automatic, b) written in 'C'? */
+ .data
+ .align 8
+_GLOBAL(sys_call_table32)
+ .llong .sys_ni_syscall /* 0 - old "setup()" system call */
+ .llong .sys32_exit
+ .llong .sys32_fork
+ .llong .sys_read
+ .llong .sys_write
+ .llong .sys32_open /* 5 */
+ .llong .sys_close
+ .llong .sys32_waitpid
+ .llong .sys32_creat
+ .llong .sys_link
+ .llong .sys_unlink /* 10 */
+ .llong .sys32_execve
+ .llong .sys_chdir
+ .llong .sys32_time
+ .llong .sys32_mknod
+ .llong .sys32_chmod /* 15 */
+ .llong .sys_lchown
+ .llong .sys_ni_syscall /* old break syscall holder */
+ .llong .sys32_stat
+ .llong .sys32_lseek
+ .llong .sys_getpid /* 20 */
+ .llong .sys32_mount
+ .llong .sys_oldumount
+ .llong .sys_setuid
+ .llong .sys_getuid
+ .llong .ppc64_sys_stime /* 25 */
+ .llong .sys32_ptrace
+ .llong .sys_alarm
+ .llong .sys32_fstat
+ .llong .sys32_pause
+ .llong .sys32_utime /* 30 */
+ .llong .sys_ni_syscall /* old stty syscall holder */
+ .llong .sys_ni_syscall /* old gtty syscall holder */
+ .llong .sys32_access
+ .llong .sys32_nice
+ .llong .sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .llong .sys_sync
+ .llong .sys32_kill
+ .llong .sys_rename
+ .llong .sys32_mkdir
+ .llong .sys_rmdir /* 40 */
+ .llong .sys_dup
+ .llong .sys_pipe
+ .llong .sys32_times
+ .llong .sys_ni_syscall /* old prof syscall holder */
+ .llong .sys_brk /* 45 */
+ .llong .sys_setgid
+ .llong .sys_getgid
+ .llong .sys_signal
+ .llong .sys_geteuid
+ .llong .sys_getegid /* 50 */
+ .llong .sys_acct
+ .llong .sys32_umount /* recycled never used phys() */
+ .llong .sys_ni_syscall /* old lock syscall holder */
+ .llong .sys32_ioctl
+ .llong .sys32_fcntl /* 55 */
+ .llong .sys_ni_syscall /* old mpx syscall holder */
+ .llong .sys32_setpgid
+ .llong .sys_ni_syscall /* old ulimit syscall holder */
+ .llong .sys_olduname
+ .llong .sys32_umask /* 60 */
+ .llong .sys_chroot
+ .llong .sys_ustat
+ .llong .sys_dup2
+ .llong .sys_getppid
+ .llong .sys_getpgrp /* 65 */
+ .llong .sys_setsid
+ .llong .sys32_sigaction
+ .llong .sys_sgetmask
+ .llong .sys32_ssetmask
+ .llong .sys_setreuid /* 70 */
+ .llong .sys_setregid
+ .llong .sys_sigsuspend
+ .llong .sys32_sigpending
+ .llong .sys32_sethostname
+ .llong .sys32_setrlimit /* 75 */
+ .llong .sys32_old_getrlimit
+ .llong .sys32_getrusage
+ .llong .sys32_gettimeofday
+ .llong .sys32_settimeofday
+ .llong .sys32_getgroups /* 80 */
+ .llong .sys32_setgroups
+ .llong .ppc32_select
+ .llong .sys_symlink
+ .llong .sys32_lstat
+ .llong .sys32_readlink /* 85 */
+ .llong .sys_uselib
+ .llong .sys32_swapon
+ .llong .sys32_reboot
+ .llong .old32_readdir
+ .llong .sys32_mmap /* 90 */
+ .llong .sys_munmap
+ .llong .sys_truncate
+ .llong .sys_ftruncate
+ .llong .sys_fchmod
+ .llong .sys_fchown /* 95 */
+ .llong .sys32_getpriority
+ .llong .sys32_setpriority
+ .llong .sys_ni_syscall /* old profil syscall holder */
+ .llong .sys32_statfs
+ .llong .sys32_fstatfs /* 100 */
+ .llong .sys32_ioperm
+ .llong .sys32_socketcall
+ .llong .sys32_syslog
+ .llong .sys32_setitimer
+ .llong .sys32_getitimer /* 105 */
+ .llong .sys32_newstat
+ .llong .sys32_newlstat
+ .llong .sys32_newfstat
+ .llong .sys_uname
+ .llong .sys32_iopl /* 110 */
+ .llong .sys_vhangup
+ .llong .sys_ni_syscall /* old 'idle' syscall */
+ .llong .sys32_vm86
+ .llong .sys32_wait4
+ .llong .sys_swapoff /* 115 */
+ .llong .sys32_sysinfo
+ .llong .sys32_ipc
+ .llong .sys_fsync
+ .llong .ppc32_sigreturn
+ .llong .sys32_clone /* 120 */
+ .llong .sys32_setdomainname
+ .llong .ppc64_newuname
+ .llong .sys32_modify_ldt
+ .llong .sys32_adjtimex
+ .llong .sys_mprotect /* 125 */
+ .llong .sys32_sigprocmask
+ .llong .sys32_create_module
+ .llong .sys32_init_module
+ .llong .sys32_delete_module
+ .llong .sys32_get_kernel_syms /* 130 */
+ .llong .sys32_quotactl
+ .llong .sys32_getpgid
+ .llong .sys_fchdir
+ .llong .sys32_bdflush
+ .llong .sys32_sysfs /* 135 */
+ .llong .sys32_personality
+ .llong .sys_ni_syscall /* for afs_syscall */
+ .llong .sys_setfsuid
+ .llong .sys_setfsgid
+ .llong .sys_llseek /* 140 */
+ .llong .sys32_getdents
+ .llong .ppc32_select
+ .llong .sys_flock
+ .llong .sys32_msync
+ .llong .sys32_readv /* 145 */
+ .llong .sys32_writev
+ .llong .sys32_getsid
+ .llong .sys_fdatasync
+ .llong .sys32_sysctl
+ .llong .sys_mlock /* 150 */
+ .llong .sys_munlock
+ .llong .sys32_mlockall
+ .llong .sys_munlockall
+ .llong .sys32_sched_setparam
+ .llong .sys32_sched_getparam /* 155 */
+ .llong .sys32_sched_setscheduler
+ .llong .sys32_sched_getscheduler
+ .llong .sys_sched_yield
+ .llong .sys32_sched_get_priority_max
+ .llong .sys32_sched_get_priority_min /* 160 */
+ .llong .sys32_sched_rr_get_interval
+ .llong .sys32_nanosleep
+ .llong .sys32_mremap
+ .llong .sys_setresuid
+ .llong .sys_getresuid /* 165 */
+ .llong .sys32_query_module
+ .llong .sys_poll
+ .llong .sys32_nfsservctl
+ .llong .sys_setresgid
+ .llong .sys_getresgid /* 170 */
+ .llong .sys32_prctl
+ .llong .ppc32_rt_sigreturn
+ .llong .sys32_rt_sigaction
+ .llong .sys32_rt_sigprocmask
+ .llong .sys32_rt_sigpending /* 175 */
+ .llong .sys32_rt_sigtimedwait
+ .llong .sys32_rt_sigqueueinfo
+ .llong .sys32_rt_sigsuspend
+ .llong .sys32_pread
+ .llong .sys32_pwrite /* 180 */
+ .llong .sys_chown
+ .llong .sys_getcwd
+ .llong .sys_capget
+ .llong .sys_capset
+ .llong .sys32_sigaltstack /* 185 */
+ .llong .sys32_sendfile
+ .llong .sys_ni_syscall /* streams1 */
+ .llong .sys_ni_syscall /* streams2 */
+ .llong .sys32_vfork
+ .llong .sys32_getrlimit /* 190 */
+ .llong .sys_ni_syscall /* 191 */ /* Unused */
+ .llong .sys_ni_syscall /* 192 - reserved - mmap2 */
+ .llong .sys32_truncate64 /* 193 - truncate64 */
+ .llong .sys32_ftruncate64 /* 194 - ftruncate64 */
+ .llong .sys_stat64 /* 195 - stat64 */
+ .llong .sys_lstat64 /* 196 - lstat64 */
+ .llong .sys_fstat64 /* 197 - fstat64 */
+ .llong .sys32_pciconfig_read /* 198 */
+ .llong .sys32_pciconfig_write /* 199 */
+ .llong .sys_ni_syscall /* 200 - reserved - sys_pciconfig_iobase */
+ .llong .sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
+ .llong .sys_getdents64 /* 202 */
+ .llong .sys_pivot_root /* 203 */
+ .llong .sys32_fcntl64 /* 204 */
+ .llong .sys_madvise /* 205 */
+ .llong .sys_mincore /* 206 */
+ .llong .sys_gettid /* 207 */
+ .llong .sys_tkill /* 208 */
+ .rept NR_syscalls-208
+ .llong .sys_ni_syscall
+ .endr
+#endif
+ .data
+ .align 8
+_GLOBAL(sys_call_table)
+ .llong .sys_ni_syscall /* 0 - old "setup()" system call */
+ .llong .sys_exit
+ .llong .sys_fork
+ .llong .sys_read
+ .llong .sys_write
+ .llong .sys_open /* 5 */
+ .llong .sys_close
+ .llong .sys_waitpid
+ .llong .sys_creat
+ .llong .sys_link
+ .llong .sys_unlink /* 10 */
+ .llong .sys_execve
+ .llong .sys_chdir
+ .llong .sys64_time
+ .llong .sys_mknod
+ .llong .sys_chmod /* 15 */
+ .llong .sys_lchown
+ .llong .sys_ni_syscall /* old break syscall holder */
+ .llong .sys_stat
+ .llong .sys_lseek
+ .llong .sys_getpid /* 20 */
+ .llong .sys_mount
+ .llong .sys_oldumount
+ .llong .sys_setuid
+ .llong .sys_getuid
+ .llong .ppc64_sys_stime /* 25 */
+ .llong .sys_ptrace
+ .llong .sys_alarm
+ .llong .sys_fstat
+ .llong .sys_pause
+ .llong .sys_utime /* 30 */
+ .llong .sys_ni_syscall /* old stty syscall holder */
+ .llong .sys_ni_syscall /* old gtty syscall holder */
+ .llong .sys_access
+ .llong .sys_nice
+ .llong .sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .llong .sys_sync
+ .llong .sys_kill
+ .llong .sys_rename
+ .llong .sys_mkdir
+ .llong .sys_rmdir /* 40 */
+ .llong .sys_dup
+ .llong .sys_pipe
+ .llong .sys_times
+ .llong .sys_ni_syscall /* old prof syscall holder */
+ .llong .sys_brk /* 45 */
+ .llong .sys_setgid
+ .llong .sys_getgid
+ .llong .sys_signal
+ .llong .sys_geteuid
+ .llong .sys_getegid /* 50 */
+ .llong .sys_acct
+ .llong .sys_umount /* recycled never used phys() */
+ .llong .sys_ni_syscall /* old lock syscall holder */
+ .llong .sys_ioctl
+ .llong .sys_fcntl /* 55 */
+ .llong .sys_ni_syscall /* old mpx syscall holder */
+ .llong .sys_setpgid
+ .llong .sys_ni_syscall /* old ulimit syscall holder */
+ .llong .sys_olduname
+ .llong .sys_umask /* 60 */
+ .llong .sys_chroot
+ .llong .sys_ustat
+ .llong .sys_dup2
+ .llong .sys_getppid
+ .llong .sys_getpgrp /* 65 */
+ .llong .sys_setsid
+ .llong .sys_sigaction
+ .llong .sys_sgetmask
+ .llong .sys_ssetmask
+ .llong .sys_setreuid /* 70 */
+ .llong .sys_setregid
+ .llong .sys_sigsuspend
+ .llong .sys_sigpending
+ .llong .sys_sethostname
+ .llong .sys_setrlimit /* 75 */
+ .llong .sys_old_getrlimit
+ .llong .sys_getrusage
+ .llong .sys_gettimeofday
+ .llong .sys_settimeofday
+ .llong .sys_getgroups /* 80 */
+ .llong .sys_setgroups
+ .llong .sys_select
+ .llong .sys_symlink
+ .llong .sys_lstat
+ .llong .sys_readlink /* 85 */
+ .llong .sys_uselib
+ .llong .sys_swapon
+ .llong .sys_reboot
+ .llong .old_readdir
+ .llong .sys_mmap /* 90 */
+ .llong .sys_munmap
+ .llong .sys_truncate
+ .llong .sys_ftruncate
+ .llong .sys_fchmod
+ .llong .sys_fchown /* 95 */
+ .llong .sys_getpriority
+ .llong .sys_setpriority
+ .llong .sys_ni_syscall /* old profil syscall holder */
+ .llong .sys_statfs
+ .llong .sys_fstatfs /* 100 */
+ .llong .sys_ioperm
+ .llong .sys_socketcall
+ .llong .sys_syslog
+ .llong .sys_setitimer
+ .llong .sys_getitimer /* 105 */
+ .llong .sys_newstat
+ .llong .sys_newlstat
+ .llong .sys_newfstat
+ .llong .sys_uname
+ .llong .sys_iopl /* 110 */
+ .llong .sys_vhangup
+ .llong .sys_ni_syscall /* old 'idle' syscall */
+ .llong .sys_vm86
+ .llong .sys_wait4
+ .llong .sys_swapoff /* 115 */
+ .llong .sys_sysinfo
+ .llong .sys_ipc
+ .llong .sys_fsync
+ .llong .ppc64_sigreturn
+ .llong .sys_clone /* 120 */
+ .llong .sys_setdomainname
+ .llong .ppc64_newuname
+ .llong .sys_modify_ldt
+ .llong .sys_adjtimex
+ .llong .sys_mprotect /* 125 */
+ .llong .sys_sigprocmask
+ .llong .sys_create_module
+ .llong .sys_init_module
+ .llong .sys_delete_module
+ .llong .sys_get_kernel_syms /* 130 */
+ .llong .sys_quotactl
+ .llong .sys_getpgid
+ .llong .sys_fchdir
+ .llong .sys_bdflush
+ .llong .sys_sysfs /* 135 */
+ .llong .sys_personality
+ .llong .sys_ni_syscall /* for afs_syscall */
+ .llong .sys_setfsuid
+ .llong .sys_setfsgid
+ .llong .sys_llseek /* 140 */
+ .llong .sys_getdents
+ .llong .sys_select
+ .llong .sys_flock
+ .llong .sys_msync
+ .llong .sys_readv /* 145 */
+ .llong .sys_writev
+ .llong .sys_getsid
+ .llong .sys_fdatasync
+ .llong .sys_sysctl
+ .llong .sys_mlock /* 150 */
+ .llong .sys_munlock
+ .llong .sys_mlockall
+ .llong .sys_munlockall
+ .llong .sys_sched_setparam
+ .llong .sys_sched_getparam /* 155 */
+ .llong .sys_sched_setscheduler
+ .llong .sys_sched_getscheduler
+ .llong .sys_sched_yield
+ .llong .sys_sched_get_priority_max
+ .llong .sys_sched_get_priority_min /* 160 */
+ .llong .sys_sched_rr_get_interval
+ .llong .sys_nanosleep
+ .llong .sys_mremap
+ .llong .sys_setresuid
+ .llong .sys_getresuid /* 165 */
+ .llong .sys_query_module
+ .llong .sys_poll
+ .llong .sys_nfsservctl
+ .llong .sys_setresgid
+ .llong .sys_getresgid /* 170 */
+ .llong .sys_prctl
+ .llong .ppc64_rt_sigreturn
+ .llong .sys_rt_sigaction
+ .llong .sys_rt_sigprocmask
+ .llong .sys_rt_sigpending /* 175 */
+ .llong .sys_rt_sigtimedwait
+ .llong .sys_rt_sigqueueinfo
+ .llong .sys_rt_sigsuspend
+ .llong .sys_pread
+ .llong .sys_pwrite /* 180 */
+ .llong .sys_chown
+ .llong .sys_getcwd
+ .llong .sys_capget
+ .llong .sys_capset
+ .llong .sys_sigaltstack /* 185 */
+ .llong .sys_sendfile
+ .llong .sys_ni_syscall /* streams1 */
+ .llong .sys_ni_syscall /* streams2 */
+ .llong .sys_vfork
+ .llong .sys_getrlimit /* 190 */
+ .llong .sys_ni_syscall /* 191 */ /* Unused */
+ .llong .sys_ni_syscall /* 192 - reserved - mmap2 */
+ .llong .sys_ni_syscall /* 193 - reserved - truncate64 */
+ .llong .sys_ni_syscall /* 194 - reserved - ftruncate64 */
+ .llong .sys_ni_syscall /* 195 - reserved - stat64 */
+ .llong .sys_ni_syscall /* 196 - reserved - lstat64 */
+ .llong .sys_ni_syscall /* 197 - reserved - fstat64 */
+ .llong .sys_pciconfig_read /* 198 */
+ .llong .sys_pciconfig_write /* 199 */
+ .llong .sys_ni_syscall /* 200 - reserved - sys_pciconfig_iobase */
+ .llong .sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
+ .llong .sys_getdents64 /* 202 */
+ .llong .sys_pivot_root /* 203 */
+ .llong .sys_ni_syscall /* 204 */
+ .llong .sys_madvise /* 205 */
+ .llong .sys_mincore /* 206 */
+ .llong .sys_gettid /* 207 */
+ .llong .sys_tkill /* 208 */
+ .rept NR_syscalls-208
+ .llong .sys_ni_syscall
+ .endr
--- /dev/null
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stddef.h>
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+
+#include <asm/Naca.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n#define\t" #sym "\t%0" : : "i" (val))
+
+int
+main(void)
+{
+ /* thread struct on stack */
+ DEFINE(THREAD_SHIFT, THREAD_SHIFT);
+ DEFINE(THREAD_SIZE, THREAD_SIZE);
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(_TIF_32BIT, _TIF_32BIT);
+
+ /* task_struct->thread */
+ DEFINE(THREAD, offsetof(struct task_struct, thread));
+ DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
+ DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
+ DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
+ DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+ DEFINE(KSP, offsetof(struct thread_struct, ksp));
+
+ DEFINE(MM, offsetof(struct task_struct, mm));
+
+ /* Naca */
+ DEFINE(DCACHEL1LINESIZE, offsetof(struct Naca, dCacheL1LineSize));
+ DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct Naca, dCacheL1LogLineSize));
+ DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct Naca, dCacheL1LinesPerPage));
+ DEFINE(ICACHEL1LINESIZE, offsetof(struct Naca, iCacheL1LineSize));
+ DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct Naca, iCacheL1LogLineSize));
+ DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct Naca, iCacheL1LinesPerPage));
+ DEFINE(SLBSIZE, offsetof(struct Naca, slb_size));
+
+ /* Paca */
+ DEFINE(PACA, offsetof(struct Naca, paca));
+ DEFINE(PACA_SIZE, sizeof(struct Paca));
+ DEFINE(PACAPACAINDEX, offsetof(struct Paca, xPacaIndex));
+ DEFINE(PACAPROCSTART, offsetof(struct Paca, xProcStart));
+ DEFINE(PACAKSAVE, offsetof(struct Paca, xKsave));
+ DEFINE(PACACURRENT, offsetof(struct Paca, xCurrent));
+ DEFINE(PACASAVEDMSR, offsetof(struct Paca, xSavedMsr));
+ DEFINE(PACASTABREAL, offsetof(struct Paca, xStab_data.real));
+ DEFINE(PACASTABVIRT, offsetof(struct Paca, xStab_data.virt));
+ DEFINE(PACASTABRR, offsetof(struct Paca, xStab_data.next_round_robin));
+ DEFINE(PACAR1, offsetof(struct Paca, xR1));
+ DEFINE(PACALPQUEUE, offsetof(struct Paca, lpQueuePtr));
+ DEFINE(PACATOC, offsetof(struct Paca, xTOC));
+ DEFINE(PACAEXCSP, offsetof(struct Paca, exception_sp));
+ DEFINE(PACAHRDWINTSTACK, offsetof(struct Paca, xHrdIntStack));
+ DEFINE(PACAPROCENABLED, offsetof(struct Paca, xProcEnabled));
+ DEFINE(PACAHRDWINTCOUNT, offsetof(struct Paca, xHrdIntCount));
+ DEFINE(PACADEFAULTDECR, offsetof(struct Paca, default_decr));
+ DEFINE(PACAPROFENABLED, offsetof(struct Paca, prof_enabled));
+ DEFINE(PACAPROFLEN, offsetof(struct Paca, prof_len));
+ DEFINE(PACAPROFSHIFT, offsetof(struct Paca, prof_shift));
+ DEFINE(PACAPROFBUFFER, offsetof(struct Paca, prof_buffer));
+ DEFINE(PACAPROFSTEXT, offsetof(struct Paca, prof_stext));
+ DEFINE(PACALPPACA, offsetof(struct Paca, xLpPaca));
+ DEFINE(LPPACA, offsetof(struct Paca, xLpPaca));
+ DEFINE(PACAREGSAV, offsetof(struct Paca, xRegSav));
+ DEFINE(PACAEXC, offsetof(struct Paca, exception_stack));
+ DEFINE(PACAGUARD, offsetof(struct Paca, guard));
+ DEFINE(LPPACASRR0, offsetof(struct ItLpPaca, xSavedSrr0));
+ DEFINE(LPPACASRR1, offsetof(struct ItLpPaca, xSavedSrr1));
+ DEFINE(LPPACAANYINT, offsetof(struct ItLpPaca, xIntDword.xAnyInt));
+ DEFINE(LPPACADECRINT, offsetof(struct ItLpPaca, xIntDword.xFields.xDecrInt));
+ DEFINE(LPQCUREVENTPTR, offsetof(struct ItLpQueue, xSlicCurEventPtr));
+ DEFINE(LPQOVERFLOW, offsetof(struct ItLpQueue, xPlicOverflowIntPending));
+ DEFINE(LPEVENTFLAGS, offsetof(struct HvLpEvent, xFlags));
+ DEFINE(PROMENTRY, offsetof(struct prom_t, entry));
+
+ /* RTAS */
+ DEFINE(RTASBASE, offsetof(struct rtas_t, base));
+ DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
+ DEFINE(RTASSIZE, offsetof(struct rtas_t, size));
+
+ /* Interrupt register frame */
+ DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+
+ /* 288 = # of volatile regs, int & fp, for leaf routines */
+ /* which do not stack a frame. See the PPC64 ABI. */
+ DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
+ /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
+ DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16 + 288);
+ DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16 + 288);
+ DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
+ DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
+ DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
+ DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
+ DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
+ DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
+ DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
+ DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
+ DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
+ DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
+ DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
+ DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
+ DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
+ DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
+ /*
+ * Note: these symbols include _ because they overlap with special
+ * register names
+ */
+ DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
+ DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
+ DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
+ DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
+ DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
+ DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
+ DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
+ DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
+ DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
+ DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
+ DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
+ DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
+
+ /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
+ DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
+ DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
+
+ DEFINE(CLONE_VM, CLONE_VM);
+
+ return 0;
+}
--- /dev/null
+/*
+ * arch/ppc/kernel/open_pic.c -- OpenPIC Interrupt Handling
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <asm/ptrace.h>
+#include <asm/signal.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+
+#include <asm/machdep.h>
+
+#include "local_irq.h"
+#include "open_pic.h"
+#include "open_pic_defs.h"
+#include "i8259.h"
+#include <asm/ppcdebug.h>
+
+void* OpenPIC_Addr;
+static volatile struct OpenPIC *OpenPIC = NULL;
+u_int OpenPIC_NumInitSenses __initdata = 0;
+u_char *OpenPIC_InitSenses __initdata = NULL;
+extern int use_of_interrupt_tree;
+
+void find_ISUs(void);
+
+static u_int NumProcessors;
+static u_int NumSources;
+static int NumISUs;
+static int open_pic_irq_offset;
+static volatile unsigned char* chrp_int_ack_special;
+static int broken_ipi_registers;
+
+OpenPIC_SourcePtr ISU[OPENPIC_MAX_ISU];
+
+static void openpic_end_irq(unsigned int irq_nr);
+static void openpic_ack_irq(unsigned int irq_nr);
+static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask);
+
+struct hw_interrupt_type open_pic = {
+ " OpenPIC ",
+ NULL,
+ NULL,
+ openpic_enable_irq,
+ openpic_disable_irq,
+ openpic_ack_irq,
+ openpic_end_irq,
+ openpic_set_affinity
+};
+
+#ifdef CONFIG_SMP
+static void openpic_end_ipi(unsigned int irq_nr);
+static void openpic_ack_ipi(unsigned int irq_nr);
+static void openpic_enable_ipi(unsigned int irq_nr);
+static void openpic_disable_ipi(unsigned int irq_nr);
+
+struct hw_interrupt_type open_pic_ipi = {
+ " OpenPIC ",
+ NULL,
+ NULL,
+ openpic_enable_ipi,
+ openpic_disable_ipi,
+ openpic_ack_ipi,
+ openpic_end_ipi,
+ 0
+};
+#endif /* CONFIG_SMP */
+
+unsigned int openpic_vec_ipi;
+unsigned int openpic_vec_timer;
+unsigned int openpic_vec_spurious;
+
+/*
+ * Accesses to the current processor's openpic registers
+ */
+#ifdef CONFIG_SMP
+#define THIS_CPU Processor[cpu]
+#define DECL_THIS_CPU int cpu = hard_smp_processor_id()
+#define CHECK_THIS_CPU check_arg_cpu(cpu)
+#else
+#define THIS_CPU Processor[hard_smp_processor_id()]
+#define DECL_THIS_CPU
+#define CHECK_THIS_CPU
+#endif /* CONFIG_SMP */
+
+#if 0
+#define check_arg_ipi(ipi) \
+ if (ipi < 0 || ipi >= OPENPIC_NUM_IPI) \
+ printk(KERN_ERR "open_pic.c:%d: illegal ipi %d\n", __LINE__, ipi);
+#define check_arg_timer(timer) \
+ if (timer < 0 || timer >= OPENPIC_NUM_TIMERS) \
+ printk(KERN_ERR "open_pic.c:%d: illegal timer %d\n", __LINE__, timer);
+#define check_arg_vec(vec) \
+ if (vec < 0 || vec >= OPENPIC_NUM_VECTORS) \
+ printk(KERN_ERR "open_pic.c:%d: illegal vector %d\n", __LINE__, vec);
+#define check_arg_pri(pri) \
+ if (pri < 0 || pri >= OPENPIC_NUM_PRI) \
+ printk(KERN_ERR "open_pic.c:%d: illegal priority %d\n", __LINE__, pri);
+/*
+ * Print out a backtrace if it's out of range, since if it's larger than NR_IRQ's
+ * data has probably been corrupted and we're going to panic or deadlock later
+ * anyway --Troy
+ */
+extern unsigned long* _get_SP(void);
+#define check_arg_irq(irq) \
+ if (irq < open_pic_irq_offset || irq >= (NumSources+open_pic_irq_offset)){ \
+ printk(KERN_ERR "open_pic.c:%d: illegal irq %d\n", __LINE__, irq); \
+ print_backtrace(_get_SP()); }
+#define check_arg_cpu(cpu) \
+ if (cpu < 0 || cpu >= OPENPIC_MAX_PROCESSORS){ \
+ printk(KERN_ERR "open_pic.c:%d: illegal cpu %d\n", __LINE__, cpu); \
+ print_backtrace(_get_SP()); }
+#else
+#define check_arg_ipi(ipi) do {} while (0)
+#define check_arg_timer(timer) do {} while (0)
+#define check_arg_vec(vec) do {} while (0)
+#define check_arg_pri(pri) do {} while (0)
+#define check_arg_irq(irq) do {} while (0)
+#define check_arg_cpu(cpu) do {} while (0)
+#endif
+
+#define GET_ISU(source) ISU[(source) >> 4][(source) & 0xf]
+
+void __init openpic_init_IRQ(void)
+{
+ struct device_node *np;
+ int i;
+ unsigned int *addrp;
+ unsigned char* chrp_int_ack_special = 0;
+ unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
+ int nmi_irq = -1;
+#if defined(CONFIG_VT) && defined(CONFIG_ADB_KEYBOARD) && defined(XMON)
+ struct device_node *kbd;
+#endif
+
+ if (!(np = find_devices("pci"))
+ || !(addrp = (unsigned int *)
+ get_property(np, "8259-interrupt-acknowledge", NULL)))
+ printk(KERN_ERR "Cannot find pci to get ack address\n");
+ else
+ chrp_int_ack_special = (unsigned char *)
+ __ioremap(addrp[prom_n_addr_cells(np)-1], 1, _PAGE_NO_CACHE);
+ /* hydra still sets OpenPIC_InitSenses to a static set of values */
+ if (OpenPIC_InitSenses == NULL) {
+ prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS);
+ OpenPIC_InitSenses = init_senses;
+ OpenPIC_NumInitSenses = NR_IRQS - NUM_8259_INTERRUPTS;
+ }
+ openpic_init(1, NUM_8259_INTERRUPTS, chrp_int_ack_special, nmi_irq);
+ for ( i = 0 ; i < NUM_8259_INTERRUPTS ; i++ )
+ irq_desc[i].handler = &i8259_pic;
+ i8259_init();
+}
+
+static inline u_int openpic_read(volatile u_int *addr)
+{
+ u_int val;
+
+ val = in_le32(addr);
+ return val;
+}
+
+static inline void openpic_write(volatile u_int *addr, u_int val)
+{
+ out_le32(addr, val);
+}
+
+static inline u_int openpic_readfield(volatile u_int *addr, u_int mask)
+{
+ u_int val = openpic_read(addr);
+ return val & mask;
+}
+
+static inline void openpic_writefield(volatile u_int *addr, u_int mask,
+ u_int field)
+{
+ u_int val = openpic_read(addr);
+ openpic_write(addr, (val & ~mask) | (field & mask));
+}
+
+static inline void openpic_clearfield(volatile u_int *addr, u_int mask)
+{
+ openpic_writefield(addr, mask, 0);
+}
+
+static inline void openpic_setfield(volatile u_int *addr, u_int mask)
+{
+ openpic_writefield(addr, mask, mask);
+}
+
+static void openpic_safe_writefield(volatile u_int *addr, u_int mask,
+ u_int field)
+{
+ unsigned int loops = 100000;
+
+ openpic_setfield(addr, OPENPIC_MASK);
+ while (openpic_read(addr) & OPENPIC_ACTIVITY) {
+ if (!loops--) {
+ printk(KERN_ERR "openpic_safe_writefield timeout\n");
+ break;
+ }
+ }
+ openpic_writefield(addr, mask | OPENPIC_MASK, field | OPENPIC_MASK);
+}
+
+#ifdef CONFIG_SMP
+static u_int openpic_read_IPI(volatile u_int* addr)
+{
+ u_int val = 0;
+
+ if (broken_ipi_registers)
+ /* yes this is right ... bug, feature, you decide! -- tgall */
+ val = in_be32(addr);
+ else
+ val = in_le32(addr);
+
+ return val;
+}
+
+static void openpic_test_broken_IPI(void)
+{
+ u_int t;
+
+ openpic_write(&OpenPIC->Global.IPI_Vector_Priority(0), OPENPIC_MASK);
+ t = openpic_read(&OpenPIC->Global.IPI_Vector_Priority(0));
+ if (t == le32_to_cpu(OPENPIC_MASK)) {
+ printk(KERN_INFO "OpenPIC reversed IPI registers detected\n");
+ broken_ipi_registers = 1;
+ }
+}
+
+/* because of the power3 be / le above, this is needed */
+static inline void openpic_writefield_IPI(volatile u_int* addr, u_int mask, u_int field)
+{
+ u_int val = openpic_read_IPI(addr);
+ openpic_write(addr, (val & ~mask) | (field & mask));
+}
+
+static inline void openpic_clearfield_IPI(volatile u_int *addr, u_int mask)
+{
+ openpic_writefield_IPI(addr, mask, 0);
+}
+
+static inline void openpic_setfield_IPI(volatile u_int *addr, u_int mask)
+{
+ openpic_writefield_IPI(addr, mask, mask);
+}
+
+static void openpic_safe_writefield_IPI(volatile u_int *addr, u_int mask, u_int field)
+{
+ unsigned int loops = 100000;
+
+ openpic_setfield_IPI(addr, OPENPIC_MASK);
+
+ /* wait until it's not in use */
+ /* BenH: Is this code really enough ? I would rather check the result
+ * and eventually retry ...
+ */
+ while(openpic_read_IPI(addr) & OPENPIC_ACTIVITY) {
+ if (!loops--) {
+ printk(KERN_ERR "openpic_safe_writefield timeout\n");
+ break;
+ }
+ }
+
+ openpic_writefield_IPI(addr, mask, field | OPENPIC_MASK);
+}
+#endif /* CONFIG_SMP */
+
+void __init openpic_init(int main_pic, int offset, unsigned char* chrp_ack,
+ int programmer_switch_irq)
+{
+ u_int t, i;
+ u_int timerfreq;
+ const char *version;
+
+ if (!OpenPIC_Addr) {
+ printk(KERN_INFO "No OpenPIC found !\n");
+ return;
+ }
+ OpenPIC = (volatile struct OpenPIC *)OpenPIC_Addr;
+
+ ppc_md.progress("openpic enter",0x122);
+
+ t = openpic_read(&OpenPIC->Global.Feature_Reporting0);
+ switch (t & OPENPIC_FEATURE_VERSION_MASK) {
+ case 1:
+ version = "1.0";
+ break;
+ case 2:
+ version = "1.2";
+ break;
+ case 3:
+ version = "1.3";
+ break;
+ default:
+ version = "?";
+ break;
+ }
+ NumProcessors = ((t & OPENPIC_FEATURE_LAST_PROCESSOR_MASK) >>
+ OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT) + 1;
+ NumSources = ((t & OPENPIC_FEATURE_LAST_SOURCE_MASK) >>
+ OPENPIC_FEATURE_LAST_SOURCE_SHIFT) + 1;
+ printk(KERN_INFO "OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n",
+ version, NumProcessors, NumSources, OpenPIC);
+ timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency);
+ if (timerfreq)
+ printk(KERN_INFO "OpenPIC timer frequency is %d.%06d MHz\n",
+ timerfreq / 1000000, timerfreq % 1000000);
+
+ if (!main_pic)
+ return;
+
+ open_pic_irq_offset = offset;
+ chrp_int_ack_special = (volatile unsigned char*)chrp_ack;
+
+ find_ISUs();
+
+ /* Initialize timer interrupts */
+ ppc_md.progress("openpic timer",0x3ba);
+ for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
+ /* Disabled, Priority 0 */
+ openpic_inittimer(i, 0, openpic_vec_timer+i);
+ /* No processor */
+ openpic_maptimer(i, 0);
+ }
+
+#ifdef CONFIG_SMP
+ /* Initialize IPI interrupts */
+ ppc_md.progress("openpic ipi",0x3bb);
+ openpic_test_broken_IPI();
+ for (i = 0; i < OPENPIC_NUM_IPI; i++) {
+ /* Disabled, Priority 10..13 */
+ openpic_initipi(i, 10+i, openpic_vec_ipi+i);
+ /* IPIs are per-CPU */
+ irq_desc[openpic_vec_ipi+i].status |= IRQ_PER_CPU;
+ irq_desc[openpic_vec_ipi+i].handler = &open_pic_ipi;
+ }
+#endif
+
+ /* Initialize external interrupts */
+ ppc_md.progress("openpic ext",0x3bc);
+
+ openpic_set_priority(0xf);
+
+ /* SIOint (8259 cascade) is special */
+ if (offset) {
+ openpic_initirq(0, 8, offset, 1, 1);
+ openpic_mapirq(0, 1<<get_hard_smp_processor_id(0));
+ }
+
+ /* Init all external sources */
+ for (i = 1; i < NumSources; i++) {
+ int pri, sense;
+
+ /* the bootloader may have left it enabled (bad !) */
+ openpic_disable_irq(i+offset);
+
+ pri = (i == programmer_switch_irq)? 9: 8;
+ sense = (i < OpenPIC_NumInitSenses)? OpenPIC_InitSenses[i]: 1;
+ if (sense)
+ irq_desc[i+offset].status = IRQ_LEVEL;
+
+ /* Enabled, Priority 8 or 9 */
+ openpic_initirq(i, pri, i+offset, !sense, sense);
+ /* Processor 0 */
+ openpic_mapirq(i, 1<<get_hard_smp_processor_id(0));
+ }
+
+ /* Init descriptors */
+ for (i = offset; i < NumSources + offset; i++)
+ irq_desc[i].handler = &open_pic;
+
+ /* Initialize the spurious interrupt */
+ ppc_md.progress("openpic spurious",0x3bd);
+ openpic_set_spurious(openpic_vec_spurious);
+
+ /* Initialize the cascade */
+ if (offset) {
+ if (request_irq(offset, no_action, SA_INTERRUPT,
+ "82c59 cascade", NULL))
+ printk(KERN_ERR "Unable to get OpenPIC IRQ 0 for cascade\n");
+ }
+ openpic_set_priority(0);
+ openpic_disable_8259_pass_through();
+
+ ppc_md.progress("openpic exit",0x222);
+}
+
+void openpic_setup_ISU(int isu_num, unsigned long addr)
+{
+ if (isu_num >= OPENPIC_MAX_ISU)
+ return;
+ ISU[isu_num] = (OpenPIC_SourcePtr) __ioremap(addr, 0x400, _PAGE_NO_CACHE);
+ if (isu_num >= NumISUs)
+ NumISUs = isu_num + 1;
+}
+
+void find_ISUs(void)
+{
+ /* Use /interrupt-controller/reg and
+ * /interrupt-controller/interrupt-ranges from OF device tree
+ * the ISU array is setup in chrp_pci.c in ibm_add_bridges
+ * as a result
+ * -- tgall
+ */
+
+ /* basically each ISU is a bus, and this assumes that
+ * open_pic_isu_count interrupts per bus are possible
+ * ISU == Interrupt Source
+ */
+ NumSources = NumISUs * 0x10;
+ openpic_vec_ipi = NumSources + open_pic_irq_offset;
+ openpic_vec_timer = openpic_vec_ipi + OPENPIC_NUM_IPI;
+ openpic_vec_spurious = openpic_vec_timer + OPENPIC_NUM_TIMERS;
+}
+
+static inline void openpic_reset(void)
+{
+ openpic_setfield(&OpenPIC->Global.Global_Configuration0,
+ OPENPIC_CONFIG_RESET);
+}
+
+static inline void openpic_enable_8259_pass_through(void)
+{
+ openpic_clearfield(&OpenPIC->Global.Global_Configuration0,
+ OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE);
+}
+
+static void openpic_disable_8259_pass_through(void)
+{
+ openpic_setfield(&OpenPIC->Global.Global_Configuration0,
+ OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE);
+}
+
+/*
+ * Find out the current interrupt
+ */
+static u_int openpic_irq(void)
+{
+ u_int vec;
+ DECL_THIS_CPU;
+
+ CHECK_THIS_CPU;
+ vec = openpic_readfield(&OpenPIC->THIS_CPU.Interrupt_Acknowledge,
+ OPENPIC_VECTOR_MASK);
+ return vec;
+}
+
+static void openpic_eoi(void)
+{
+ DECL_THIS_CPU;
+
+ CHECK_THIS_CPU;
+ openpic_write(&OpenPIC->THIS_CPU.EOI, 0);
+ /* Handle PCI write posting */
+ (void)openpic_read(&OpenPIC->THIS_CPU.EOI);
+}
+
+
+static inline u_int openpic_get_priority(void)
+{
+ DECL_THIS_CPU;
+
+ CHECK_THIS_CPU;
+ return openpic_readfield(&OpenPIC->THIS_CPU.Current_Task_Priority,
+ OPENPIC_CURRENT_TASK_PRIORITY_MASK);
+}
+
+static void openpic_set_priority(u_int pri)
+{
+ DECL_THIS_CPU;
+
+ CHECK_THIS_CPU;
+ check_arg_pri(pri);
+ openpic_writefield(&OpenPIC->THIS_CPU.Current_Task_Priority,
+ OPENPIC_CURRENT_TASK_PRIORITY_MASK, pri);
+}
+
+/*
+ * Get/set the spurious vector
+ */
+static inline u_int openpic_get_spurious(void)
+{
+ return openpic_readfield(&OpenPIC->Global.Spurious_Vector,
+ OPENPIC_VECTOR_MASK);
+}
+
+static void openpic_set_spurious(u_int vec)
+{
+ check_arg_vec(vec);
+ openpic_writefield(&OpenPIC->Global.Spurious_Vector, OPENPIC_VECTOR_MASK,
+ vec);
+}
+
+/*
+ * Convert a cpu mask from logical to physical cpu numbers.
+ */
+static inline u32 physmask(u32 cpumask)
+{
+ int i;
+ u32 mask = 0;
+
+ for (i = 0; i < smp_num_cpus; ++i, cpumask >>= 1)
+ mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
+ return mask;
+}
+
+void openpic_init_processor(u_int cpumask)
+{
+ openpic_write(&OpenPIC->Global.Processor_Initialization,
+ physmask(cpumask));
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Initialize an interprocessor interrupt (and disable it)
+ *
+ * ipi: OpenPIC interprocessor interrupt number
+ * pri: interrupt source priority
+ * vec: the vector it will produce
+ */
+static void __init openpic_initipi(u_int ipi, u_int pri, u_int vec)
+{
+ check_arg_ipi(ipi);
+ check_arg_pri(pri);
+ check_arg_vec(vec);
+ openpic_safe_writefield_IPI(&OpenPIC->Global.IPI_Vector_Priority(ipi),
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK,
+ (pri << OPENPIC_PRIORITY_SHIFT) | vec);
+}
+
+/*
+ * Send an IPI to one or more CPUs
+ *
+ * Externally called, however, it takes an IPI number (0...OPENPIC_NUM_IPI)
+ * and not a system-wide interrupt number
+ */
+void openpic_cause_IPI(u_int ipi, u_int cpumask)
+{
+ DECL_THIS_CPU;
+
+ CHECK_THIS_CPU;
+ check_arg_ipi(ipi);
+ openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
+ physmask(cpumask));
+}
+
+void openpic_request_IPIs(void)
+{
+ int i;
+
+ /*
+ * Make sure this matches what is defined in smp.c for
+ * smp_message_{pass|recv}() or what shows up in
+ * /proc/interrupts will be wrong!!! --Troy */
+
+ if (OpenPIC == NULL)
+ return;
+
+ request_irq(openpic_vec_ipi,
+ openpic_ipi_action, 0, "IPI0 (call function)", 0);
+ request_irq(openpic_vec_ipi+1,
+ openpic_ipi_action, 0, "IPI1 (reschedule)", 0);
+ request_irq(openpic_vec_ipi+2,
+ openpic_ipi_action, 0, "IPI2 (invalidate tlb)", 0);
+ request_irq(openpic_vec_ipi+3,
+ openpic_ipi_action, 0, "IPI3 (xmon break)", 0);
+
+ for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
+ openpic_enable_ipi(openpic_vec_ipi+i);
+}
+
+/*
+ * Do per-cpu setup for SMP systems.
+ *
+ * Get IPI's working and start taking interrupts.
+ * -- Cort
+ */
+static spinlock_t openpic_setup_lock __initdata = SPIN_LOCK_UNLOCKED;
+
+void __init do_openpic_setup_cpu(void)
+{
+#ifdef CONFIG_IRQ_ALL_CPUS
+ int i;
+ u32 msk = 1 << hard_smp_processor_id();
+#endif
+
+ spin_lock(&openpic_setup_lock);
+
+#ifdef CONFIG_IRQ_ALL_CPUS
+ /* let the openpic know we want intrs. default affinity
+ * is 0xffffffff until changed via /proc
+ * That's how it's done on x86. If we want it differently, then
+ * we should make sure we also change the default values of irq_affinity
+ * in irq.c.
+ */
+ for (i = 0; i < NumSources ; i++)
+ openpic_mapirq(i, openpic_read(&GET_ISU(i).Destination) | msk);
+#endif /* CONFIG_IRQ_ALL_CPUS */
+ openpic_set_priority(0);
+
+ spin_unlock(&openpic_setup_lock);
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Initialize a timer interrupt (and disable it)
+ *
+ * timer: OpenPIC timer number
+ * pri: interrupt source priority
+ * vec: the vector it will produce
+ */
+static void __init openpic_inittimer(u_int timer, u_int pri, u_int vec)
+{
+ check_arg_timer(timer);
+ check_arg_pri(pri);
+ check_arg_vec(vec);
+ openpic_safe_writefield(&OpenPIC->Global.Timer[timer].Vector_Priority,
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK,
+ (pri << OPENPIC_PRIORITY_SHIFT) | vec);
+}
+
+/*
+ * Map a timer interrupt to one or more CPUs
+ */
+static void __init openpic_maptimer(u_int timer, u_int cpumask)
+{
+ check_arg_timer(timer);
+ openpic_write(&OpenPIC->Global.Timer[timer].Destination,
+ physmask(cpumask));
+}
+
+
+/*
+ *
+ * All functions below take an offset'ed irq argument
+ *
+ */
+
+
+/*
+ * Enable/disable an external interrupt source
+ *
+ * Externally called, irq is an offseted system-wide interrupt number
+ */
+static void openpic_enable_irq(u_int irq)
+{
+ unsigned int loops = 100000;
+ check_arg_irq(irq);
+
+ openpic_clearfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority, OPENPIC_MASK);
+ /* make sure mask gets to controller before we return to user */
+ do {
+ if (!loops--) {
+ printk(KERN_ERR "openpic_enable_irq timeout\n");
+ break;
+ }
+
+ mb(); /* sync is probably useless here */
+ } while(openpic_readfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority,
+ OPENPIC_MASK));
+}
+
+static void openpic_disable_irq(u_int irq)
+{
+ u32 vp;
+ unsigned int loops = 100000;
+
+ check_arg_irq(irq);
+
+ openpic_setfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority, OPENPIC_MASK);
+ /* make sure mask gets to controller before we return to user */
+ do {
+ if (!loops--) {
+ printk(KERN_ERR "openpic_disable_irq timeout\n");
+ break;
+ }
+
+ mb(); /* sync is probably useless here */
+ vp = openpic_readfield(&GET_ISU(irq - open_pic_irq_offset).Vector_Priority,
+ OPENPIC_MASK | OPENPIC_ACTIVITY);
+ } while((vp & OPENPIC_ACTIVITY) && !(vp & OPENPIC_MASK));
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Enable/disable an IPI interrupt source
+ *
+ * Externally called, irq is an offseted system-wide interrupt number
+ */
+void openpic_enable_ipi(u_int irq)
+{
+ irq -= openpic_vec_ipi;
+ check_arg_ipi(irq);
+ openpic_clearfield_IPI(&OpenPIC->Global.IPI_Vector_Priority(irq), OPENPIC_MASK);
+
+}
+void openpic_disable_ipi(u_int irq)
+{
+ /* NEVER disable an IPI... that's just plain wrong! */
+}
+
+#endif
+
+/*
+ * Initialize an interrupt source (and disable it!)
+ *
+ * irq: OpenPIC interrupt number
+ * pri: interrupt source priority
+ * vec: the vector it will produce
+ * pol: polarity (1 for positive, 0 for negative)
+ * sense: 1 for level, 0 for edge
+ */
+static void openpic_initirq(u_int irq, u_int pri, u_int vec, int pol, int sense)
+{
+ openpic_safe_writefield(&GET_ISU(irq).Vector_Priority,
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK |
+ OPENPIC_SENSE_MASK | OPENPIC_POLARITY_MASK,
+ (pri << OPENPIC_PRIORITY_SHIFT) | vec |
+ (pol ? OPENPIC_POLARITY_POSITIVE :
+ OPENPIC_POLARITY_NEGATIVE) |
+ (sense ? OPENPIC_SENSE_LEVEL : OPENPIC_SENSE_EDGE));
+}
+
+/*
+ * Map an interrupt source to one or more CPUs
+ */
+static void openpic_mapirq(u_int irq, u_int physmask)
+{
+ openpic_write(&GET_ISU(irq).Destination, physmask);
+}
+
+/*
+ * Set the sense for an interrupt source (and disable it!)
+ *
+ * sense: 1 for level, 0 for edge
+ */
+static inline void openpic_set_sense(u_int irq, int sense)
+{
+ openpic_safe_writefield(&GET_ISU(irq).Vector_Priority,
+ OPENPIC_SENSE_LEVEL,
+ (sense ? OPENPIC_SENSE_LEVEL : 0));
+}
+
+/* No spinlocks, should not be necessary with the OpenPIC
+ * (1 register = 1 interrupt and we have the desc lock).
+ */
+static void openpic_ack_irq(unsigned int irq_nr)
+{
+}
+
+static void openpic_end_irq(unsigned int irq_nr)
+{
+ if ((irq_desc[irq_nr].status & IRQ_LEVEL) != 0)
+ openpic_eoi();
+}
+
+static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask)
+{
+ openpic_mapirq(irq_nr - open_pic_irq_offset, physmask(cpumask));
+}
+
+#ifdef CONFIG_SMP
+static void openpic_ack_ipi(unsigned int irq_nr)
+{
+}
+
+static void openpic_end_ipi(unsigned int irq_nr)
+{
+ /* IPIs are marked IRQ_PER_CPU. This has the side effect of
+ * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
+ * applying to them. We EOI them late to avoid re-entering.
+ * however, I'm wondering if we could simply let them have the
+ * SA_INTERRUPT flag and let them execute with all interrupts OFF.
+ * This would have the side effect of either running cross-CPU
+ * functions with interrupts off, or we can re-enable them explicitely
+ * with a __sti() in smp_call_function_interrupt(), since
+ * smp_call_function() is protected by a spinlock.
+ * Or maybe we shouldn't set the IRQ_PER_CPU flag on cross-CPU
+ * function calls IPI at all but that would make a special case.
+ */
+ openpic_eoi();
+}
+
+static void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ smp_message_recv(cpl-openpic_vec_ipi, regs);
+}
+
+#endif /* CONFIG_SMP */
+
+int openpic_get_irq(struct pt_regs *regs)
+{
+ extern int i8259_irq(int cpu);
+
+ int irq = openpic_irq();
+
+ /* Management of the cascade should be moved out of here */
+ if (open_pic_irq_offset && irq == open_pic_irq_offset)
+ {
+ /*
+ * This magic address generates a PCI IACK cycle.
+ */
+ if ( chrp_int_ack_special )
+ irq = *chrp_int_ack_special;
+ else
+ irq = i8259_irq( smp_processor_id() );
+ openpic_eoi();
+ }
+ if (irq == openpic_vec_spurious)
+ irq = -1;
+ return irq;
+}
--- /dev/null
+/*
+ * arch/ppc/kernel/open_pic.h -- OpenPIC Interrupt Handling
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#ifndef _PPC64_KERNEL_OPEN_PIC_H
+#define _PPC64_KERNEL_OPEN_PIC_H
+
+#include <linux/config.h>
+
+#define OPENPIC_SIZE 0x40000
+
+/* OpenPIC IRQ controller structure */
+extern struct hw_interrupt_type open_pic;
+
+/* OpenPIC IPI controller structure */
+#ifdef CONFIG_SMP
+extern struct hw_interrupt_type open_pic_ipi;
+#endif /* CONFIG_SMP */
+
+extern u_int OpenPIC_NumInitSenses;
+extern u_char *OpenPIC_InitSenses;
+extern void* OpenPIC_Addr;
+
+/* Exported functions */
+extern void openpic_init(int, int, unsigned char *, int);
+extern void openpic_request_IPIs(void);
+extern void do_openpic_setup_cpu(void);
+extern int openpic_get_irq(struct pt_regs *regs);
+extern void openpic_init_processor(u_int cpumask);
+extern void openpic_setup_ISU(int isu_num, unsigned long addr);
+extern void openpic_cause_IPI(u_int ipi, u_int cpumask);
+
+extern inline int openpic_to_irq(int irq)
+{
+ return irq += NUM_8259_INTERRUPTS;
+}
+/*extern int open_pic_irq_offset;*/
+#endif /* _PPC64_KERNEL_OPEN_PIC_H */
--- /dev/null
+/*
+ * linux/openpic.h -- OpenPIC definitions
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is based on the following documentation:
+ *
+ * The Open Programmable Interrupt Controller (PIC)
+ * Register Interface Specification Revision 1.2
+ *
+ * Issue Date: October 1995
+ *
+ * Issued jointly by Advanced Micro Devices and Cyrix Corporation
+ *
+ * AMD is a registered trademark of Advanced Micro Devices, Inc.
+ * Copyright (C) 1995, Advanced Micro Devices, Inc. and Cyrix, Inc.
+ * All Rights Reserved.
+ *
+ * To receive a copy of this documentation, send an email to openpic@amd.com.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _LINUX_OPENPIC_H
+#define _LINUX_OPENPIC_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+/*
+ * OpenPIC supports up to 2048 interrupt sources and up to 32 processors
+ */
+
+#define OPENPIC_MAX_SOURCES 2048
+#define OPENPIC_MAX_PROCESSORS 32
+#define OPENPIC_MAX_ISU 32
+
+#define OPENPIC_NUM_TIMERS 4
+#define OPENPIC_NUM_IPI 4
+#define OPENPIC_NUM_PRI 16
+#define OPENPIC_NUM_VECTORS OPENPIC_MAX_SOURCES
+
+/*
+ * OpenPIC Registers are 32 bits and aligned on 128 bit boundaries
+ */
+
+typedef struct _OpenPIC_Reg {
+ u_int Reg; /* Little endian! */
+ char Pad[0xc];
+} OpenPIC_Reg;
+
+
+/*
+ * Per Processor Registers
+ */
+
+typedef struct _OpenPIC_Processor {
+ /*
+ * Private Shadow Registers (for SLiC backwards compatibility)
+ */
+ u_int IPI0_Dispatch_Shadow; /* Write Only */
+ char Pad1[0x4];
+ u_int IPI0_Vector_Priority_Shadow; /* Read/Write */
+ char Pad2[0x34];
+ /*
+ * Interprocessor Interrupt Command Ports
+ */
+ OpenPIC_Reg _IPI_Dispatch[OPENPIC_NUM_IPI]; /* Write Only */
+ /*
+ * Current Task Priority Register
+ */
+ OpenPIC_Reg _Current_Task_Priority; /* Read/Write */
+ char Pad3[0x10];
+ /*
+ * Interrupt Acknowledge Register
+ */
+ OpenPIC_Reg _Interrupt_Acknowledge; /* Read Only */
+ /*
+ * End of Interrupt (EOI) Register
+ */
+ OpenPIC_Reg _EOI; /* Read/Write */
+ char Pad5[0xf40];
+} OpenPIC_Processor;
+
+
+ /*
+ * Timer Registers
+ */
+
+typedef struct _OpenPIC_Timer {
+ OpenPIC_Reg _Current_Count; /* Read Only */
+ OpenPIC_Reg _Base_Count; /* Read/Write */
+ OpenPIC_Reg _Vector_Priority; /* Read/Write */
+ OpenPIC_Reg _Destination; /* Read/Write */
+} OpenPIC_Timer;
+
+
+ /*
+ * Global Registers
+ */
+
+typedef struct _OpenPIC_Global {
+ /*
+ * Feature Reporting Registers
+ */
+ OpenPIC_Reg _Feature_Reporting0; /* Read Only */
+ OpenPIC_Reg _Feature_Reporting1; /* Future Expansion */
+ /*
+ * Global Configuration Registers
+ */
+ OpenPIC_Reg _Global_Configuration0; /* Read/Write */
+ OpenPIC_Reg _Global_Configuration1; /* Future Expansion */
+ /*
+ * Vendor Specific Registers
+ */
+ OpenPIC_Reg _Vendor_Specific[4];
+ /*
+ * Vendor Identification Register
+ */
+ OpenPIC_Reg _Vendor_Identification; /* Read Only */
+ /*
+ * Processor Initialization Register
+ */
+ OpenPIC_Reg _Processor_Initialization; /* Read/Write */
+ /*
+ * IPI Vector/Priority Registers
+ */
+ OpenPIC_Reg _IPI_Vector_Priority[OPENPIC_NUM_IPI]; /* Read/Write */
+ /*
+ * Spurious Vector Register
+ */
+ OpenPIC_Reg _Spurious_Vector; /* Read/Write */
+ /*
+ * Global Timer Registers
+ */
+ OpenPIC_Reg _Timer_Frequency; /* Read/Write */
+ OpenPIC_Timer Timer[OPENPIC_NUM_TIMERS];
+ char Pad1[0xee00];
+} OpenPIC_Global;
+
+
+ /*
+ * Interrupt Source Registers
+ */
+
+typedef struct _OpenPIC_Source {
+ OpenPIC_Reg _Vector_Priority; /* Read/Write */
+ OpenPIC_Reg _Destination; /* Read/Write */
+} OpenPIC_Source, *OpenPIC_SourcePtr;
+
+
+ /*
+ * OpenPIC Register Map
+ */
+
+struct OpenPIC {
+ char Pad1[0x1000];
+ /*
+ * Global Registers
+ */
+ OpenPIC_Global Global;
+ /*
+ * Interrupt Source Configuration Registers
+ */
+ OpenPIC_Source Source[OPENPIC_MAX_SOURCES];
+ /*
+ * Per Processor Registers
+ */
+ OpenPIC_Processor Processor[OPENPIC_MAX_PROCESSORS];
+};
+
+extern volatile struct OpenPIC *OpenPIC;
+
+
+/*
+ * Current Task Priority Register
+ */
+
+#define OPENPIC_CURRENT_TASK_PRIORITY_MASK 0x0000000f
+
+/*
+ * Who Am I Register
+ */
+
+#define OPENPIC_WHO_AM_I_ID_MASK 0x0000001f
+
+/*
+ * Feature Reporting Register 0
+ */
+
+#define OPENPIC_FEATURE_LAST_SOURCE_MASK 0x07ff0000
+#define OPENPIC_FEATURE_LAST_SOURCE_SHIFT 16
+#define OPENPIC_FEATURE_LAST_PROCESSOR_MASK 0x00001f00
+#define OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT 8
+#define OPENPIC_FEATURE_VERSION_MASK 0x000000ff
+
+/*
+ * Global Configuration Register 0
+ */
+
+#define OPENPIC_CONFIG_RESET 0x80000000
+#define OPENPIC_CONFIG_8259_PASSTHROUGH_DISABLE 0x20000000
+#define OPENPIC_CONFIG_BASE_MASK 0x000fffff
+
+/*
+ * Vendor Identification Register
+ */
+
+#define OPENPIC_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define OPENPIC_VENDOR_ID_STEPPING_SHIFT 16
+#define OPENPIC_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define OPENPIC_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define OPENPIC_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+
+/*
+ * Vector/Priority Registers
+ */
+
+#define OPENPIC_MASK 0x80000000
+#define OPENPIC_ACTIVITY 0x40000000 /* Read Only */
+#define OPENPIC_PRIORITY_MASK 0x000f0000
+#define OPENPIC_PRIORITY_SHIFT 16
+#define OPENPIC_VECTOR_MASK 0x000007ff
+
+
+/*
+ * Interrupt Source Registers
+ */
+
+#define OPENPIC_POLARITY_POSITIVE 0x00800000
+#define OPENPIC_POLARITY_NEGATIVE 0x00000000
+#define OPENPIC_POLARITY_MASK 0x00800000
+#define OPENPIC_SENSE_LEVEL 0x00400000
+#define OPENPIC_SENSE_EDGE 0x00000000
+#define OPENPIC_SENSE_MASK 0x00400000
+
+
+/*
+ * Timer Registers
+ */
+
+#define OPENPIC_COUNT_MASK 0x7fffffff
+#define OPENPIC_TIMER_TOGGLE 0x80000000
+#define OPENPIC_TIMER_COUNT_INHIBIT 0x80000000
+
+
+/*
+ * Aliases to make life simpler
+ */
+
+/* Per Processor Registers */
+#define IPI_Dispatch(i) _IPI_Dispatch[i].Reg
+#define Current_Task_Priority _Current_Task_Priority.Reg
+#define Interrupt_Acknowledge _Interrupt_Acknowledge.Reg
+#define EOI _EOI.Reg
+
+/* Global Registers */
+#define Feature_Reporting0 _Feature_Reporting0.Reg
+#define Feature_Reporting1 _Feature_Reporting1.Reg
+#define Global_Configuration0 _Global_Configuration0.Reg
+#define Global_Configuration1 _Global_Configuration1.Reg
+#define Vendor_Specific(i) _Vendor_Specific[i].Reg
+#define Vendor_Identification _Vendor_Identification.Reg
+#define Processor_Initialization _Processor_Initialization.Reg
+#define IPI_Vector_Priority(i) _IPI_Vector_Priority[i].Reg
+#define Spurious_Vector _Spurious_Vector.Reg
+#define Timer_Frequency _Timer_Frequency.Reg
+
+/* Timer Registers */
+#define Current_Count _Current_Count.Reg
+#define Base_Count _Base_Count.Reg
+#define Vector_Priority _Vector_Priority.Reg
+#define Destination _Destination.Reg
+
+/* Interrupt Source Registers */
+#define Vector_Priority _Vector_Priority.Reg
+#define Destination _Destination.Reg
+
+/*
+ * Local (static) OpenPIC Operations
+ */
+
+
+/* Global Operations */
+static void openpic_reset(void);
+static void openpic_enable_8259_pass_through(void);
+static void openpic_disable_8259_pass_through(void);
+static u_int openpic_irq(void);
+static void openpic_eoi(void);
+static u_int openpic_get_priority(void);
+static void openpic_set_priority(u_int pri);
+static u_int openpic_get_spurious(void);
+static void openpic_set_spurious(u_int vector);
+
+#ifdef CONFIG_SMP
+/* Interprocessor Interrupts */
+static void openpic_initipi(u_int ipi, u_int pri, u_int vector);
+static void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs);
+#endif
+
+/* Timer Interrupts */
+static void openpic_inittimer(u_int timer, u_int pri, u_int vector);
+static void openpic_maptimer(u_int timer, u_int cpumask);
+
+/* Interrupt Sources */
+static void openpic_enable_irq(u_int irq);
+static void openpic_disable_irq(u_int irq);
+static void openpic_initirq(u_int irq, u_int pri, u_int vector, int polarity,
+ int is_level);
+static void openpic_mapirq(u_int irq, u_int cpumask);
+static void openpic_set_sense(u_int irq, int sense);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_OPENPIC_H */
--- /dev/null
+/*
+ * arch/ppc64/kernel/pSeries_hvCall.S
+ *
+ *
+ * This file contains the generic code to perform a call to the
+ * pSeries LPAR hypervisor.
+ * NOTE: this file will go away when we move to inline this work.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include "ppc_asm.h"
+
+/*
+ * hcall interface to pSeries LPAR
+ */
+#define HSC .long 0x44000022
+
+/* long plpar_hcall(unsigned long opcode, R3
+ unsigned long arg1, R4
+ unsigned long arg2, R5
+ unsigned long arg3, R6
+ unsigned long arg4, R7
+ unsigned long *out1, R8
+ unsigned long *out2, R9
+ unsigned long *out3); R10
+ */
+
+ .text
+_GLOBAL(plpar_hcall)
+ mfcr r0
+ std r0,-8(r1)
+ stdu r1,-32(r1)
+
+ std r8,-8(r1) /* Save out ptrs. */
+ std r9,-16(r1)
+ std r10,-24(r1)
+
+ HSC /* invoke the hypervisor */
+
+ ld r10,-8(r1) /* Fetch r4-r7 ret args. */
+ std r4,0(r10)
+ ld r10,-16(r1)
+ std r5,0(r10)
+ ld r10,-24(r1)
+ std r6,0(r10)
+
+ ld r1,0(r1)
+ ld r0,-8(r1)
+ mtcrf 0xff,r0
+ blr /* return r3 = status */
+
+
+/* Simple interface with no output values (other than status) */
+_GLOBAL(plpar_hcall_norets)
+ mfcr r0
+ std r0,-8(r1)
+ HSC /* invoke the hypervisor */
+ ld r0,-8(r1)
+ mtcrf 0xff,r0
+ blr /* return r3 = status */
--- /dev/null
+/*
+ * pSeries_lpar.c
+ * Copyright (C) 2001 Todd Inglett, IBM Corporation
+ *
+ * pSeries LPAR support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/abs_addr.h>
+#include <asm/mmu_context.h>
+#include <asm/ppcdebug.h>
+#include <asm/pci_dma.h>
+#include <linux/pci.h>
+#include <asm/Naca.h>
+
+/* Status return values */
+#define H_Success 0
+#define H_Busy 1 /* Hardware busy -- retry later */
+#define H_Hardware -1 /* Hardware error */
+#define H_Function -2 /* Function not supported */
+#define H_Privilege -3 /* Caller not privileged */
+#define H_Parameter -4 /* Parameter invalid, out-of-range or conflicting */
+#define H_Bad_Mode -5 /* Illegal msr value */
+#define H_PTEG_Full -6 /* PTEG is full */
+#define H_Not_Found -7 /* PTE was not found" */
+#define H_Reserved_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
+
+/* Flags */
+#define H_LARGE_PAGE (1UL<<(63-16))
+#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */
+#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */
+#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */
+#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
+#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
+#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
+#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
+#define H_COPY_PAGE (1UL<<(63-49))
+#define H_N (1UL<<(63-61))
+#define H_PP1 (1UL<<(63-62))
+#define H_PP2 (1UL<<(63-63))
+
+
+
+/* pSeries hypervisor opcodes */
+#define H_REMOVE 0x04
+#define H_ENTER 0x08
+#define H_READ 0x0c
+#define H_CLEAR_MOD 0x10
+#define H_CLEAR_REF 0x14
+#define H_PROTECT 0x18
+#define H_GET_TCE 0x1c
+#define H_PUT_TCE 0x20
+#define H_SET_SPRG0 0x24
+#define H_SET_DABR 0x28
+#define H_PAGE_INIT 0x2c
+#define H_SET_ASR 0x30
+#define H_ASR_ON 0x34
+#define H_ASR_OFF 0x38
+#define H_LOGICAL_CI_LOAD 0x3c
+#define H_LOGICAL_CI_STORE 0x40
+#define H_LOGICAL_CACHE_LOAD 0x44
+#define H_LOGICAL_CACHE_STORE 0x48
+#define H_LOGICAL_ICBI 0x4c
+#define H_LOGICAL_DCBF 0x50
+#define H_GET_TERM_CHAR 0x54
+#define H_PUT_TERM_CHAR 0x58
+#define H_REAL_TO_LOGICAL 0x5c
+#define H_HYPERVISOR_DATA 0x60
+#define H_EOI 0x64
+#define H_CPPR 0x68
+#define H_IPI 0x6c
+#define H_IPOLL 0x70
+#define H_XIRR 0x74
+
+#define HSC ".long 0x44000022\n"
+#define H_ENTER_r3 "li 3, 0x08\n"
+
+/* plpar_hcall() -- Generic call interface using above opcodes
+ *
+ * The actual call interface is a hypervisor call instruction with
+ * the opcode in R3 and input args in R4-R7.
+ * Status is returned in R3 with variable output values in R4-R11.
+ * Only H_PTE_READ with H_READ_4 uses R6-R11 so we ignore it for now
+ * and return only two out args which MUST ALWAYS BE PROVIDED.
+ */
+long plpar_hcall(unsigned long opcode,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long *out1,
+ unsigned long *out2,
+ unsigned long *out3);
+
+/* Same as plpar_hcall but for those opcodes that return no values
+ * other than status. Slightly more efficient.
+ */
+long plpar_hcall_norets(unsigned long opcode, ...);
+
+
+long plpar_pte_enter(unsigned long flags,
+ unsigned long ptex,
+ unsigned long new_pteh, unsigned long new_ptel,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ unsigned long dummy, ret;
+ ret = plpar_hcall(H_ENTER, flags, ptex, new_pteh, new_ptel,
+ old_pteh_ret, old_ptel_ret, &dummy);
+ return(ret);
+}
+
+long plpar_pte_remove(unsigned long flags,
+ unsigned long ptex,
+ unsigned long avpn,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ unsigned long dummy;
+ return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0,
+ old_pteh_ret, old_ptel_ret, &dummy);
+}
+
+long plpar_pte_read(unsigned long flags,
+ unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ unsigned long dummy;
+ return plpar_hcall(H_READ, flags, ptex, 0, 0,
+ old_pteh_ret, old_ptel_ret, &dummy);
+}
+
+long plpar_pte_protect(unsigned long flags,
+ unsigned long ptex,
+ unsigned long avpn)
+{
+ return plpar_hcall_norets(H_PROTECT, flags, ptex);
+}
+
+long plpar_tce_get(unsigned long liobn,
+ unsigned long ioba,
+ unsigned long *tce_ret)
+{
+ unsigned long dummy;
+ return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0,
+ tce_ret, &dummy, &dummy);
+}
+
+
+long plpar_tce_put(unsigned long liobn,
+ unsigned long ioba,
+ unsigned long tceval)
+{
+ return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
+}
+
+long plpar_get_term_char(unsigned long termno,
+ unsigned long *len_ret,
+ char *buf_ret)
+{
+ unsigned long *lbuf = (unsigned long *)buf_ret; /* ToDo: alignment? */
+ return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0,
+ len_ret, lbuf+0, lbuf+1);
+}
+
+long plpar_put_term_char(unsigned long termno,
+ unsigned long len,
+ const char *buffer)
+{
+ unsigned long dummy;
+ unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */
+ return plpar_hcall(H_PUT_TERM_CHAR, termno, len,
+ lbuf[0], lbuf[1], &dummy, &dummy, &dummy);
+}
+
+long plpar_eoi(unsigned long xirr)
+{
+ return plpar_hcall_norets(H_EOI, xirr);
+}
+
+long plpar_cppr(unsigned long cppr)
+{
+ return plpar_hcall_norets(H_CPPR, cppr);
+}
+
+long plpar_ipi(unsigned long servernum,
+ unsigned long mfrr)
+{
+ return plpar_hcall_norets(H_IPI, servernum, mfrr);
+}
+
+long plpar_xirr(unsigned long *xirr_ret)
+{
+ unsigned long dummy;
+ return plpar_hcall(H_XIRR, 0, 0, 0, 0,
+ xirr_ret, &dummy, &dummy);
+}
+
+/*
+ * The following section contains code that ultimately should
+ * be put in the relavent file (htab.c, xics.c, etc). It has
+ * been put here for the time being in order to ease maintainence
+ * of the pSeries LPAR code until it can all be put into CVS.
+ */
+static void hpte_invalidate_pSeriesLP(unsigned long slot)
+{
+ HPTE old_pte;
+ unsigned long lpar_rc;
+ unsigned long flags = 0;
+
+ lpar_rc = plpar_pte_remove(flags,
+ slot,
+ 0,
+ &old_pte.dw0.dword0,
+ &old_pte.dw1.dword1);
+ if (lpar_rc != H_Success) BUG();
+}
+
+/* NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
+ * the low 3 bits of flags happen to line up. So no transform is needed.
+ * We can probably optimize here and assume the high bits of newpp are
+ * already zero. For now I am paranoid.
+ */
+static void hpte_updatepp_pSeriesLP(long slot, unsigned long newpp, unsigned long va)
+{
+ unsigned long lpar_rc;
+ unsigned long flags;
+ flags = newpp & 3;
+ lpar_rc = plpar_pte_protect( flags,
+ slot,
+ 0);
+ if (lpar_rc != H_Success) {
+ udbg_printf( " bad return code from pte protect rc = %lx \n", lpar_rc);
+ for (;;);
+ }
+}
+
+static void hpte_updateboltedpp_pSeriesLP(unsigned long newpp, unsigned long ea)
+{
+ unsigned long lpar_rc;
+ unsigned long vsid,va,vpn,flags;
+ long slot;
+
+ vsid = get_kernel_vsid( ea );
+ va = ( vsid << 28 ) | ( ea & 0x0fffffff );
+ vpn = va >> PAGE_SHIFT;
+
+ slot = ppc_md.hpte_find( vpn );
+ flags = newpp & 3;
+ lpar_rc = plpar_pte_protect( flags,
+ slot,
+ 0);
+ if (lpar_rc != H_Success) {
+ udbg_printf( " bad return code from pte bolted protect rc = %lx \n", lpar_rc);
+ for (;;);
+ }
+}
+
+
+static unsigned long hpte_getword0_pSeriesLP(unsigned long slot)
+{
+ unsigned long dword0;
+ unsigned long lpar_rc;
+ unsigned long dummy_word1;
+ unsigned long flags;
+ /* Read 1 pte at a time */
+ /* Do not need RPN to logical page translation */
+ /* No cross CEC PFT access */
+ flags = 0;
+
+ lpar_rc = plpar_pte_read(flags,
+ slot,
+ &dword0, &dummy_word1);
+ if (lpar_rc != H_Success) {
+ udbg_printf(" error on pte read in get_hpte0 rc = %lx \n", lpar_rc);
+ for (;;);
+ }
+
+ return(dword0);
+}
+
+static long hpte_selectslot_pSeriesLP(unsigned long vpn)
+{
+ unsigned long primary_hash;
+ unsigned long hpteg_slot;
+ unsigned i, k;
+ unsigned long flags;
+ HPTE pte_read;
+ unsigned long lpar_rc;
+
+ /* Search the primary group for an available slot */
+ primary_hash = hpt_hash(vpn, 0);
+
+ hpteg_slot = ( primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+
+ /* Read 1 pte at a time */
+ /* Do not need RPN to logical page translation */
+ /* No cross CEC PFT access */
+ flags = 0;
+ for (i=0; i<HPTES_PER_GROUP; ++i) {
+ /* read the hpte entry from the slot */
+ lpar_rc = plpar_pte_read(flags,
+ hpteg_slot + i,
+ &pte_read.dw0.dword0, &pte_read.dw1.dword1);
+ if (lpar_rc != H_Success) {
+ udbg_printf(" read of hardware page table failed rc = %lx \n", lpar_rc);
+ for (;;);
+ }
+ if ( pte_read.dw0.dw0.v == 0 ) {
+ /* If an available slot found, return it */
+ return hpteg_slot + i;
+ }
+
+ }
+
+
+ /* Search the secondary group for an available slot */
+ hpteg_slot = ( ~primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+
+
+ for (i=0; i<HPTES_PER_GROUP; ++i) {
+ /* read the hpte entry from the slot */
+ lpar_rc = plpar_pte_read(flags,
+ hpteg_slot + i,
+ &pte_read.dw0.dword0, &pte_read.dw1.dword1);
+ if (lpar_rc != H_Success) {
+ udbg_printf(" read of hardware page table failed2 rc = %lx \n", lpar_rc);
+ for (;;);
+ }
+ if ( pte_read.dw0.dw0.v == 0 ) {
+ /* If an available slot found, return it */
+ return hpteg_slot + i;
+ }
+
+ }
+
+ /* No available entry found in secondary group */
+
+
+ /* Select an entry in the primary group to replace */
+
+ hpteg_slot = ( primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+
+ k = htab_data.next_round_robin++ & 0x7;
+
+ for (i=0; i<HPTES_PER_GROUP; ++i) {
+ if (k == HPTES_PER_GROUP)
+ k = 0;
+
+ lpar_rc = plpar_pte_read(flags,
+ hpteg_slot + k,
+ &pte_read.dw0.dword0, &pte_read.dw1.dword1);
+ if (lpar_rc != H_Success) {
+ udbg_printf( " pte read failed - rc = %lx", lpar_rc);
+ for (;;);
+ }
+ if ( ! pte_read.dw0.dw0.bolted)
+ {
+ hpteg_slot += k;
+ /* Invalidate the current entry */
+ ppc_md.hpte_invalidate(hpteg_slot);
+ return hpteg_slot;
+ }
+ ++k;
+ }
+
+ /* No non-bolted entry found in primary group - time to panic */
+ udbg_printf("select_hpte_slot - No non-bolted HPTE in group 0x%lx! \n", hpteg_slot/HPTES_PER_GROUP);
+ udbg_printf("No non-bolted HPTE in group %lx", (unsigned long)hpteg_slot/HPTES_PER_GROUP);
+ for (;;);
+
+ /* never executes - avoid compiler errors */
+ return 0;
+}
+
+
+static void hpte_create_valid_pSeriesLP(unsigned long slot, unsigned long vpn,
+ unsigned long prpn, unsigned hash,
+ void *ptep, unsigned hpteflags,
+ unsigned bolted)
+{
+ /* Local copy of HPTE */
+ struct {
+ /* Local copy of first doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } dw0;
+ /* Local copy of second doubleword of HPTE */
+ union {
+ unsigned long d;
+ Hpte_dword1 h;
+ Hpte_dword1_flags f;
+ } dw1;
+ } lhpte;
+
+ unsigned long avpn = vpn >> 11;
+ unsigned long arpn = physRpn_to_absRpn( prpn );
+
+ unsigned long lpar_rc;
+ unsigned long flags;
+ HPTE ret_hpte;
+
+ /* Fill in the local HPTE with absolute rpn, avpn and flags */
+ lhpte.dw1.d = 0;
+ lhpte.dw1.h.rpn = arpn;
+ lhpte.dw1.f.flags = hpteflags;
+
+ lhpte.dw0.d = 0;
+ lhpte.dw0.h.avpn = avpn;
+ lhpte.dw0.h.h = hash;
+ lhpte.dw0.h.bolted = bolted;
+ lhpte.dw0.h.v = 1;
+
+ /* Now fill in the actual HPTE */
+ /* Set CEC cookie to 0 */
+ /* Large page = 0 */
+ /* Zero page = 0 */
+ /* I-cache Invalidate = 0 */
+ /* I-cache synchronize = 0 */
+ /* Exact = 1 - only modify exact entry */
+ flags = H_EXACT;
+
+ if (hpteflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
+ lhpte.dw1.f.flags &= ~_PAGE_COHERENT;
+#if 1
+ __asm__ __volatile__ (
+ H_ENTER_r3
+ "mr 4, %1\n"
+ "mr 5, %2\n"
+ "mr 6, %3\n"
+ "mr 7, %4\n"
+ HSC
+ "mr %0, 3\n"
+ : "=r" (lpar_rc)
+ : "r" (flags), "r" (slot), "r" (lhpte.dw0.d), "r" (lhpte.dw1.d)
+ : "r3", "r4", "r5", "r6", "r7", "cc");
+#else
+ lpar_rc = plpar_pte_enter(flags,
+ slot,
+ lhpte.dw0.d,
+ lhpte.dw1.d,
+ &ret_hpte.dw0.dword0,
+ &ret_hpte.dw1.dword1);
+#endif
+ if (lpar_rc != H_Success) {
+ udbg_printf("error on pte enter lapar rc = %ld\n",lpar_rc);
+ udbg_printf("ent: s=%lx, dw0=%lx, dw1=%lx\n", slot, lhpte.dw0.d, lhpte.dw1.d);
+ /* xmon_backtrace("backtrace"); */
+ for (;;);
+ }
+}
+
+static long hpte_find_pSeriesLP(unsigned long vpn)
+{
+ union {
+ unsigned long d;
+ Hpte_dword0 h;
+ } hpte_dw0;
+ long slot;
+ unsigned long hash;
+ unsigned long i,j;
+
+ hash = hpt_hash(vpn, 0);
+ for ( j=0; j<2; ++j ) {
+ slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
+ for ( i=0; i<HPTES_PER_GROUP; ++i ) {
+ hpte_dw0.d = hpte_getword0_pSeriesLP( slot );
+ if ( ( hpte_dw0.h.avpn == ( vpn >> 11 ) ) &&
+ ( hpte_dw0.h.v ) &&
+ ( hpte_dw0.h.h == j ) ) {
+ /* HPTE matches */
+ if ( j )
+ slot = -slot;
+ return slot;
+ }
+ ++slot;
+ }
+ hash = ~hash;
+ }
+ return -1;
+}
+
+/*
+ * Create a pte - LPAR . Used during initialization only.
+ * We assume the PTE will fit in the primary PTEG.
+ */
+void make_pte_LPAR(HPTE *htab,
+ unsigned long va, unsigned long pa, int mode,
+ unsigned long hash_mask, int large)
+{
+ HPTE local_hpte, ret_hpte;
+ unsigned long hash, slot, flags,lpar_rc, vpn;
+
+ if (large)
+ vpn = va >> 24;
+ else
+ vpn = va >> 12;
+
+ hash = hpt_hash(vpn, large);
+
+ slot = ((hash & hash_mask)*HPTES_PER_GROUP);
+
+ local_hpte.dw1.dword1 = pa | mode;
+ local_hpte.dw0.dword0 = 0;
+ local_hpte.dw0.dw0.avpn = va >> 23;
+ local_hpte.dw0.dw0.bolted = 1; /* bolted */
+ local_hpte.dw0.dw0.v = 1;
+
+ /* Set CEC cookie to 0 */
+ /* Large page = 0 */
+ /* Zero page = 0 */
+ /* I-cache Invalidate = 0 */
+ /* I-cache synchronize = 0 */
+ /* Exact = 0 - modify any entry in group */
+ flags = 0;
+#if 1
+ __asm__ __volatile__ (
+ H_ENTER_r3
+ "mr 4, %1\n"
+ "mr 5, %2\n"
+ "mr 6, %3\n"
+ "mr 7, %4\n"
+ HSC
+ "mr %0, 3\n"
+ : "=r" (lpar_rc)
+ : "r" (flags), "r" (slot), "r" (local_hpte.dw0.dword0), "r" (local_hpte.dw1.dword1)
+ : "r3", "r4", "r5", "r6", "r7", "cc");
+#else
+ lpar_rc = plpar_pte_enter(flags,
+ slot,
+ local_hpte.dw0.dword0,
+ local_hpte.dw1.dword1,
+ &ret_hpte.dw0.dword0,
+ &ret_hpte.dw1.dword1);
+#endif
+#if 0 /* NOTE: we explicitly do not check return status here because it is
+ * "normal" for early boot code to map io regions for which a partition
+ * has no access. However, we will die if we actually fault on these
+ * "permission denied" pages.
+ */
+ if (lpar_rc != H_Success) {
+ /* pSeriesLP_init_early(); */
+ udbg_printf("flags=%lx, slot=%lx, dword0=%lx, dword1=%lx, rc=%d\n", flags, slot, local_hpte.dw0.dword0,local_hpte.dw1.dword1, lpar_rc);
+ BUG();
+ }
+#endif
+}
+
+static void tce_build_pSeriesLP(struct TceTable *tbl, long tcenum,
+ unsigned long uaddr, int direction )
+{
+ u64 setTceRc;
+ union Tce tce;
+
+ PPCDBG(PPCDBG_TCE, "build_tce: uaddr = 0x%lx\n", uaddr);
+ PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx, tbl = 0x%lx, index=%lx\n",
+ tcenum, tbl, tbl->index);
+
+ tce.wholeTce = 0;
+ tce.tceBits.rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
+
+ tce.tceBits.readWrite = 1;
+ if ( direction != PCI_DMA_TODEVICE ) tce.tceBits.pciWrite = 1;
+
+ setTceRc = plpar_tce_put((u64)tbl->index,
+ (u64)tcenum << 12,
+ tce.wholeTce );
+ /* Make sure the update is visible to hardware.
+ * ToDo: sync after setting *all* the tce's.
+ */
+ __asm__ __volatile__ ("sync" : : : "memory");
+
+ if(setTceRc) {
+ PPCDBG(PPCDBG_TCE, "setTce failed. rc=%ld\n", setTceRc);
+ PPCDBG(PPCDBG_TCE, "\tindex = 0x%lx\n", (u64)tbl->index);
+ PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx\n", (u64)tcenum);
+ PPCDBG(PPCDBG_TCE, "\ttce val = 0x%lx\n", tce.wholeTce );
+ }
+}
+
+static inline void free_tce_range(struct TceTable *tbl,
+ long tcenum, unsigned order )
+{
+ unsigned long flags;
+
+ /* Lock the tce allocation bitmap */
+ spin_lock_irqsave( &(tbl->lock), flags );
+
+ /* Do the actual work */
+ free_tce_range_nolock( tbl, tcenum, order );
+
+ /* Unlock the tce allocation bitmap */
+ spin_unlock_irqrestore( &(tbl->lock), flags );
+
+}
+
+static void tce_free_pSeriesLP(struct TceTable *tbl, dma_addr_t dma_addr,
+ unsigned order, unsigned numPages)
+{
+ u64 setTceRc;
+ long tcenum, freeTce, maxTcenum;
+ unsigned i;
+ union Tce tce;
+
+ maxTcenum = (tbl->size * (PAGE_SIZE / sizeof(union Tce))) - 1;
+
+ tcenum = dma_addr >> PAGE_SHIFT;
+
+ freeTce = tcenum - tbl->startOffset;
+
+ if ( freeTce > maxTcenum ) {
+ printk("free_tces: tcenum > maxTcenum\n");
+ printk("\ttcenum = 0x%lx\n", tcenum);
+ printk("\tfreeTce = 0x%lx\n", freeTce);
+ printk("\tmaxTcenum = 0x%lx\n", maxTcenum);
+ printk("\tTCE Table = 0x%lx\n", (u64)tbl);
+ printk("\tbus# = 0x%lx\n",
+ (u64)tbl->busNumber );
+ printk("\tsize = 0x%lx\n", (u64)tbl->size);
+ printk("\tstartOff = 0x%lx\n",
+ (u64)tbl->startOffset );
+ printk("\tindex = 0x%lx\n", (u64)tbl->index);
+ return;
+ }
+
+ for (i=0; i<numPages; ++i) {
+ tce.wholeTce = 0;
+ setTceRc = plpar_tce_put((u64)tbl->index,
+ (u64)tcenum << 12, /* note: not freeTce */
+ tce.wholeTce );
+ if ( setTceRc ) {
+ printk("tce_free: setTce failed\n");
+ printk("\trc = %ld\n", setTceRc);
+ printk("\tindex = 0x%lx\n",
+ (u64)tbl->index);
+ printk("\ttcenum = 0x%lx\n", (u64)tcenum);
+ printk("\tfreeTce = 0x%lx\n", (u64)freeTce);
+ printk("\ttce val = 0x%lx\n",
+ tce.wholeTce );
+ }
+
+ ++tcenum;
+ }
+
+ /* Make sure the update is visible to hardware. */
+ __asm__ __volatile__ ("sync" : : : "memory");
+
+ free_tce_range( tbl, freeTce, order );
+}
+
+/* PowerPC Interrupts for lpar. */
+/* NOTE: this typedef is duplicated (for now) from xics.c! */
+typedef struct {
+ int (*xirr_info_get)(int cpu);
+ void (*xirr_info_set)(int cpu, int val);
+ void (*cppr_info)(int cpu, u8 val);
+ void (*qirr_info)(int cpu, u8 val);
+} xics_ops;
+static int pSeriesLP_xirr_info_get(int n_cpu)
+{
+ unsigned long lpar_rc;
+ unsigned long return_value;
+
+ lpar_rc = plpar_xirr(&return_value);
+ if (lpar_rc != H_Success) {
+ panic(" bad return code xirr - rc = %lx \n", lpar_rc);
+ }
+ return ((int)(return_value));
+}
+
+static void pSeriesLP_xirr_info_set(int n_cpu, int value)
+{
+ unsigned long lpar_rc;
+ unsigned long val64 = value & 0xffffffff;
+
+ lpar_rc = plpar_eoi(val64);
+ if (lpar_rc != H_Success) {
+ panic(" bad return code EOI - rc = %ld, value=%lx \n", lpar_rc, val64);
+ }
+}
+
+static void pSeriesLP_cppr_info(int n_cpu, u8 value)
+{
+ unsigned long lpar_rc;
+
+ lpar_rc = plpar_cppr(value);
+ if (lpar_rc != H_Success) {
+ panic(" bad return code cppr - rc = %lx \n", lpar_rc);
+ }
+}
+
+static void pSeriesLP_qirr_info(int n_cpu , u8 value)
+{
+ unsigned long lpar_rc;
+
+ lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu),value);
+ if (lpar_rc != H_Success) {
+ panic(" bad return code qirr -ipi - rc = %lx \n", lpar_rc);
+ }
+}
+
+xics_ops pSeriesLP_ops = {
+ pSeriesLP_xirr_info_get,
+ pSeriesLP_xirr_info_set,
+ pSeriesLP_cppr_info,
+ pSeriesLP_qirr_info
+};
+/* end TAI-LPAR */
+
+
+int vtermno; /* virtual terminal# for udbg */
+
+static void udbg_putcLP(unsigned char c)
+{
+ char buf[16];
+ unsigned long rc;
+
+ if (c == '\n')
+ udbg_putcLP('\r');
+
+ buf[0] = c;
+ do {
+ rc = plpar_put_term_char(vtermno, 1, buf);
+ } while(rc == H_Busy);
+}
+
+/* Buffered chars getc */
+static long inbuflen;
+static long inbuf[2]; /* must be 2 longs */
+
+static int udbg_getc_pollLP(void)
+{
+ /* The interface is tricky because it may return up to 16 chars.
+ * We save them statically for future calls to udbg_getc().
+ */
+ char ch, *buf = (char *)inbuf;
+ int i;
+ long rc;
+ if (inbuflen == 0) {
+ /* get some more chars. */
+ inbuflen = 0;
+ rc = plpar_get_term_char(vtermno, &inbuflen, buf);
+ if (inbuflen == 0 && rc == H_Success)
+ return -1;
+ }
+ ch = buf[0];
+ for (i = 1; i < inbuflen; i++) /* shuffle them down. */
+ buf[i-1] = buf[i];
+ inbuflen--;
+ return ch;
+}
+
+static unsigned char udbg_getcLP(void)
+{
+ int ch;
+ for (;;) {
+ ch = udbg_getc_pollLP();
+ if (ch == -1) {
+ /* This shouldn't be needed...but... */
+ volatile unsigned long delay;
+ for (delay=0; delay < 2000000; delay++)
+ ;
+ } else {
+ return ch;
+ }
+ }
+}
+
+
+/* This is called early in setup.c.
+ * Use it to setup page table ppc_md stuff as well as udbg.
+ */
+void pSeriesLP_init_early(void)
+{
+ ppc_md.hpte_invalidate = hpte_invalidate_pSeriesLP;
+ ppc_md.hpte_updatepp = hpte_updatepp_pSeriesLP;
+ ppc_md.hpte_updateboltedpp = hpte_updateboltedpp_pSeriesLP;
+ ppc_md.hpte_getword0 = hpte_getword0_pSeriesLP;
+ ppc_md.hpte_selectslot = hpte_selectslot_pSeriesLP;
+ ppc_md.hpte_create_valid = hpte_create_valid_pSeriesLP;
+ ppc_md.hpte_find = hpte_find_pSeriesLP;
+
+ ppc_md.tce_build = tce_build_pSeriesLP;
+ ppc_md.tce_free = tce_free_pSeriesLP;
+
+#ifdef CONFIG_SMP
+ smp_init_pSeries();
+#endif
+ pSeries_pcibios_init_early();
+
+ /* The keyboard is not useful in the LPAR environment.
+ * Leave all the interfaces NULL.
+ */
+
+ if (naca->serialPortAddr) {
+ void *comport = (void *)__ioremap(naca->serialPortAddr, 16, _PAGE_NO_CACHE);
+ udbg_init_uart(comport);
+ ppc_md.udbg_putc = udbg_putc;
+ ppc_md.udbg_getc = udbg_getc;
+ ppc_md.udbg_getc_poll = udbg_getc_poll;
+ } else {
+ /* lookup the first virtual terminal number in case we don't have a com port.
+ * Zero is probably correct in case someone calls udbg before the init.
+ * The property is a pair of numbers. The first is the starting termno (the
+ * one we use) and the second is the number of terminals.
+ */
+ u32 *termno;
+ struct device_node *np = find_path_device("/rtas");
+ if (np) {
+ termno = (u32 *)get_property(np, "ibm,termno", 0);
+ if (termno)
+ vtermno = termno[0];
+ }
+ ppc_md.udbg_putc = udbg_putcLP;
+ ppc_md.udbg_getc = udbg_getcLP;
+ ppc_md.udbg_getc_poll = udbg_getc_pollLP;
+ }
+}
+
+int hvc_get_chars(int index, char *buf, int count)
+{
+ unsigned long got;
+
+ if (plpar_hcall(H_GET_TERM_CHAR, index, 0, 0, 0, &got,
+ (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) {
+ /*
+ * Work around a HV bug where it gives us a null
+ * after every \r. -- paulus
+ */
+ if (got > 0) {
+ int i;
+ for (i = 1; i < got; ++i) {
+ if (buf[i] == 0 && buf[i-1] == '\r') {
+ --got;
+ if (i < got)
+ memmove(&buf[i], &buf[i+1],
+ got - i);
+ }
+ }
+ }
+ return got;
+ }
+ return 0;
+}
+
+int hvc_put_chars(int index, const char *buf, int count)
+{
+ unsigned long dummy;
+ unsigned long *lbuf = (unsigned long *) buf;
+ long ret;
+
+ ret = plpar_hcall(H_PUT_TERM_CHAR, index, count, lbuf[0], lbuf[1],
+ &dummy, &dummy, &dummy);
+ if (ret == H_Success)
+ return count;
+ if (ret == H_Busy)
+ return 0;
+ return -1;
+}
+
+int hvc_count(int *start_termno)
+{
+ u32 *termno;
+ struct device_node *dn;
+
+ if ((dn = find_path_device("/rtas")) != NULL) {
+ if ((termno = (u32 *)get_property(dn, "ibm,termno", 0)) != NULL) {
+ if (start_termno)
+ *start_termno = termno[0];
+ return termno[1];
+ }
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * pSeries_pci.c
+ *
+ * pSeries_pcibios_init(void)opyright (C) 2001 Dave Engebretsen, IBM Corporation
+ *
+ * pSeries specific routines for PCI.
+ *
+ * Based on code from pci.c and chrp_pci.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/init.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppcdebug.h>
+#include <asm/Naca.h>
+#include <asm/pci_dma.h>
+#ifdef CONFIG_PPC_EEH
+#include <asm/eeh.h>
+#endif
+
+#include "xics.h"
+#include "open_pic.h"
+#include "pci.h"
+
+extern struct device_node *allnodes;
+
+/*******************************************************************
+ * Forward declares of prototypes.
+ *******************************************************************/
+unsigned long find_and_init_phbs(void);
+struct pci_controller* alloc_phb(struct device_node *dev, char *model, unsigned int addr_size_words) ;
+void pSeries_pcibios_fixup(void);
+static int rtas_fake_read(struct device_node *dn, int offset, int nbytes, unsigned long *returnval);
+
+/* RTAS tokens */
+static int read_pci_config;
+static int write_pci_config;
+static int ibm_read_pci_config;
+static int ibm_write_pci_config;
+
+static int s7a_workaround;
+
+/******************************************************************************
+ *
+ * pSeries I/O Operations to access the PCI configuration space.
+ *
+ *****************************************************************************/
+#define RTAS_PCI_READ_OP(size, type, nbytes) \
+int __chrp \
+rtas_read_config_##size(struct device_node *dn, int offset, type val) { \
+ unsigned long returnval = ~0L; \
+ unsigned long buid; \
+ unsigned int addr; \
+ int ret; \
+ \
+ if (dn == NULL) { \
+ ret = -2; \
+ } else if (dn->status) { \
+ ret = -1; \
+ } else { \
+ addr = (dn->busno << 16) | (dn->devfn << 8) | offset; \
+ buid = dn->phb->buid; \
+ if (buid) { \
+ ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, addr, buid >> 32, buid & 0xffffffff, nbytes); \
+ if (ret < 0) \
+ ret = rtas_fake_read(dn, offset, nbytes, &returnval); \
+ } else { \
+ ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, nbytes); \
+ } \
+ } \
+ *val = returnval; \
+ return ret; \
+} \
+int __chrp \
+rtas_pci_read_config_##size(struct pci_dev *dev, int offset, type val) { \
+ struct device_node *dn = pci_device_to_OF_node(dev); \
+ int ret = rtas_read_config_##size(dn, offset, val); \
+ /* udbg_printf("read bus=%x, devfn=%x, ret=%d phb=%lx, dn=%lx\n", dev->bus->number, dev->devfn, ret, dn ? dn->phb : 0, dn); */ \
+ return ret ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; \
+}
+
+#define RTAS_PCI_WRITE_OP(size, type, nbytes) \
+int __chrp \
+rtas_write_config_##size(struct device_node *dn, int offset, type val) { \
+ unsigned long buid; \
+ unsigned int addr; \
+ int ret; \
+ \
+ if (dn == NULL) { \
+ ret = -2; \
+ } else if (dn->status) { \
+ ret = -1; \
+ } else { \
+ buid = dn->phb->buid; \
+ addr = (dn->busno << 16) | (dn->devfn << 8) | offset; \
+ if (buid) { \
+ ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, nbytes, (ulong) val); \
+ } else { \
+ ret = rtas_call(write_pci_config, 3, 1, NULL, addr, nbytes, (ulong)val); \
+ } \
+ } \
+ return ret; \
+} \
+int __chrp \
+rtas_pci_write_config_##size(struct pci_dev *dev, int offset, type val) { \
+ return rtas_write_config_##size(pci_device_to_OF_node(dev), offset, val); \
+}
+
+RTAS_PCI_READ_OP(byte, u8 *, 1)
+RTAS_PCI_READ_OP(word, u16 *, 2)
+RTAS_PCI_READ_OP(dword, u32 *, 4)
+RTAS_PCI_WRITE_OP(byte, u8, 1)
+RTAS_PCI_WRITE_OP(word, u16, 2)
+RTAS_PCI_WRITE_OP(dword, u32, 4)
+
+struct pci_ops rtas_pci_ops = {
+ rtas_pci_read_config_byte,
+ rtas_pci_read_config_word,
+ rtas_pci_read_config_dword,
+ rtas_pci_write_config_byte,
+ rtas_pci_write_config_word,
+ rtas_pci_write_config_dword,
+};
+
+/*
+ * Handle the case where rtas refuses to do a pci config read.
+ * This currently only happens with some PHBs in which case we totally fake
+ * out the values (and call it a speedwagaon -- something we could look up
+ * in the device tree).
+ */
+static int
+rtas_fake_read(struct device_node *dn, int offset, int nbytes, unsigned long *returnval)
+{
+ char *device_type = (char *)get_property(dn, "device_type", 0);
+ u32 *class_code = (u32 *)get_property(dn, "class-code", 0);
+
+ *returnval = ~0; /* float by default */
+
+ /* udbg_printf("rtas_fake_read dn=%p, offset=0x%02x, nbytes=%d, device_type=%s\n", dn, offset, nbytes, device_type ? device_type : "<none>"); */
+ if (device_type && strcmp(device_type, "pci") != 0)
+ return -3; /* Not a phb or bridge */
+
+ /* NOTE: class_code != NULL => EADS pci bridge. Else a PHB */
+ if (nbytes == 1) {
+ if (offset == PCI_HEADER_TYPE)
+ *returnval = 0x80; /* multifunction */
+ else if (offset == PCI_INTERRUPT_PIN || offset == PCI_INTERRUPT_LINE)
+ *returnval = 0;
+ } else if (nbytes == 2) {
+ if (offset == PCI_SUBSYSTEM_VENDOR_ID || offset == PCI_SUBSYSTEM_ID)
+ *returnval = 0;
+ else if (offset == PCI_COMMAND)
+ *returnval = PCI_COMMAND_PARITY|PCI_COMMAND_MASTER|PCI_COMMAND_MEMORY;
+ } else if (nbytes == 4) {
+ if (offset == PCI_VENDOR_ID)
+ *returnval = 0x1014 | ((class_code ? 0x8b : 0x102) << 16); /* a phb */
+ else if (offset == PCI_REVISION_ID)
+ *returnval = (class_code ? PCI_CLASS_BRIDGE_PCI : PCI_CLASS_BRIDGE_HOST) << 16; /* revs are zero */
+ else if ((offset >= PCI_BASE_ADDRESS_0 && offset <= PCI_BASE_ADDRESS_5) || offset == PCI_ROM_ADDRESS)
+ *returnval = 0;
+ }
+
+ /* printk("fake: %s nbytes=%d, offset=%lx ret=%lx\n", class_code ? "EADS" : "PHB", nbytes, offset, *returnval); */
+ return 0;
+}
+
+/******************************************************************
+ * pci_read_irq_line
+ *
+ * Reads the Interrupt Pin to determine if interrupt is use by card.
+ * If the interrupt is used, then gets the interrupt line from the
+ * openfirmware and sets it in the pci_dev and pci_config line.
+ *
+ ******************************************************************/
+int
+pci_read_irq_line(struct pci_dev *Pci_Dev)
+{
+ u8 InterruptPin;
+ struct device_node *Node;
+
+ pci_read_config_byte(Pci_Dev, PCI_INTERRUPT_PIN, &InterruptPin);
+ if (InterruptPin == 0) {
+ PPCDBG(PPCDBG_BUSWALK,"\tDevice: %s No Interrupt used by device.\n",Pci_Dev->slot_name);
+ return 0;
+ }
+ Node = pci_device_to_OF_node(Pci_Dev);
+ if ( Node == NULL) {
+ PPCDBG(PPCDBG_BUSWALK,"\tDevice: %s Device Node not found.\n",Pci_Dev->slot_name);
+ return -1;
+ }
+ if (Node->n_intrs == 0) {
+ PPCDBG(PPCDBG_BUSWALK,"\tDevice: %s No Device OF interrupts defined.\n",Pci_Dev->slot_name);
+ return -1;
+ }
+ Pci_Dev->irq = Node->intrs[0].line;
+
+ if (s7a_workaround) {
+ if (Pci_Dev->irq > 16)
+ Pci_Dev->irq -= 3;
+ }
+
+ pci_write_config_byte(Pci_Dev, PCI_INTERRUPT_LINE, Pci_Dev->irq);
+
+ PPCDBG(PPCDBG_BUSWALK,"\tDevice: %s pci_dev->irq = 0x%02X\n",Pci_Dev->slot_name,Pci_Dev->irq);
+ return 0;
+}
+
+/******************************************************************
+ * Find all PHBs in the system and initialize a set of data
+ * structures to represent them.
+ ******************************************************************/
+unsigned long __init
+find_and_init_phbs(void)
+{
+ struct device_node *Pci_Node;
+ struct pci_controller *phb;
+ unsigned int root_addr_size_words = 0, this_addr_size_words = 0;
+ unsigned int this_addr_count = 0, range_stride;
+ unsigned int *ui_ptr = NULL, *ranges;
+ char *model;
+ struct pci_range64 range;
+ struct resource *res;
+ unsigned int memno, rlen, i, index;
+ unsigned int *opprop;
+ int has_isa = 0;
+ PPCDBG(PPCDBG_PHBINIT, "find_and_init_phbs\n");
+
+ read_pci_config = rtas_token("read-pci-config");
+ write_pci_config = rtas_token("write-pci-config");
+ ibm_read_pci_config = rtas_token("ibm,read-pci-config");
+ ibm_write_pci_config = rtas_token("ibm,write-pci-config");
+#ifdef CONFIG_PPC_EEH
+ eeh_init();
+#endif
+
+ if (naca->interrupt_controller == IC_OPEN_PIC) {
+ opprop = (unsigned int *)get_property(find_path_device("/"),
+ "platform-open-pic", NULL);
+ }
+
+ /* Get the root address word size. */
+ ui_ptr = (unsigned int *) get_property(find_path_device("/"),
+ "#size-cells", NULL);
+ if (ui_ptr) {
+ root_addr_size_words = *ui_ptr;
+ } else {
+ PPCDBG(PPCDBG_PHBINIT, "\tget #size-cells failed.\n");
+ return(-1);
+ }
+
+ if (find_type_devices("isa")) {
+ has_isa = 1;
+ PPCDBG(PPCDBG_PHBINIT, "\tFound an ISA bus.\n");
+ }
+
+ index = 0;
+
+ /******************************************************************
+ * Find all PHB devices and create an object for them.
+ ******************************************************************/
+ for (Pci_Node = find_devices("pci"); Pci_Node != NULL; Pci_Node = Pci_Node->next) {
+ model = (char *) get_property(Pci_Node, "model", NULL);
+ if (model != NULL) {
+ phb = alloc_phb(Pci_Node, model, root_addr_size_words);
+ if (phb == NULL) return(-1);
+ }
+ else {
+ continue;
+ }
+
+ /* Get this node's address word size. */
+ ui_ptr = (unsigned int *) get_property(Pci_Node, "#size-cells", NULL);
+ if (ui_ptr)
+ this_addr_size_words = *ui_ptr;
+ else
+ this_addr_size_words = 1;
+ /* Get this node's address word count. */
+ ui_ptr = (unsigned int *) get_property(Pci_Node, "#address-cells", NULL);
+ if (ui_ptr)
+ this_addr_count = *ui_ptr;
+ else
+ this_addr_count = 3;
+
+ range_stride = this_addr_count + root_addr_size_words + this_addr_size_words;
+
+ memno = 0;
+ phb->io_base_phys = 0;
+
+ ranges = (unsigned int *) get_property(Pci_Node, "ranges", &rlen);
+ PPCDBG(PPCDBG_PHBINIT, "\trange_stride = 0x%lx, rlen = 0x%x\n", range_stride, rlen);
+
+ for (i = 0; i < (rlen/sizeof(*ranges)); i+=range_stride) {
+ /* Put the PCI addr part of the current element into a
+ * '64' struct.
+ */
+ range = *((struct pci_range64 *)(ranges + i));
+
+ /* If this is a '32' element, map into a 64 struct. */
+ if ((range_stride * sizeof(int)) ==
+ sizeof(struct pci_range32)) {
+ range.parent_addr =
+ (unsigned long)(*(ranges + i + 3));
+ range.size =
+ (((unsigned long)(*(ranges + i + 4)))<<32) |
+ (*(ranges + i + 5));
+ } else {
+ range.parent_addr =
+ (((unsigned long)(*(ranges + i + 3)))<<32) |
+ (*(ranges + i + 4));
+ range.size =
+ (((unsigned long)(*(ranges + i + 5)))<<32) |
+ (*(ranges + i + 6));
+ }
+
+ PPCDBG(PPCDBG_PHBINIT, "\trange.parent_addr = 0x%lx\n",
+ range.parent_addr);
+ PPCDBG(PPCDBG_PHBINIT, "\trange.child_addr.hi = 0x%lx\n",
+ range.child_addr.a_hi);
+ PPCDBG(PPCDBG_PHBINIT, "\trange.child_addr.mid = 0x%lx\n",
+ range.child_addr.a_mid);
+ PPCDBG(PPCDBG_PHBINIT, "\trange.child_addr.lo = 0x%lx\n",
+ range.child_addr.a_lo);
+ PPCDBG(PPCDBG_PHBINIT, "\trange.size = 0x%lx\n",
+ range.size);
+
+ res = NULL;
+ switch ((range.child_addr.a_hi >> 24) & 0x3) {
+ case 1: /* I/O space */
+ PPCDBG(PPCDBG_PHBINIT, "\tIO Space\n");
+ phb->io_base_phys = range.parent_addr;
+ res = &phb->io_resource;
+ res->name = Pci_Node->full_name;
+ res->flags = IORESOURCE_IO;
+#ifdef CONFIG_PPC_EEH
+ if (!isa_io_base && has_isa) {
+ /* map a page for ISA ports. Not EEH protected. */
+ isa_io_base = (unsigned long)__ioremap(phb->io_base_phys, PAGE_SIZE, _PAGE_NO_CACHE);
+ }
+ res->start = phb->io_base_virt = eeh_token(index, 0, 0, 0);
+ res->end = eeh_token(index, 0xff, 0xff, 0xffffffff);
+#else
+ phb->io_base_virt = ioremap(phb->io_base_phys, range.size);
+ if (!pci_io_base) {
+ pci_io_base = (unsigned long)phb->io_base_virt;
+ if (has_isa)
+ isa_io_base = pci_io_base;
+ }
+ res->start = ((((unsigned long) range.child_addr.a_mid) << 32) | (range.child_addr.a_lo));
+ res->start += (unsigned long)phb->io_base_virt;
+ res->end = res->start + range.size - 1;
+#endif
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+ phb->pci_io_offset = range.parent_addr -
+ ((((unsigned long)
+ range.child_addr.a_mid) << 32) |
+ (range.child_addr.a_lo));
+ PPCDBG(PPCDBG_PHBINIT, "\tpci_io_offset = 0x%lx\n",
+ phb->pci_io_offset);
+ break;
+ case 2: /* mem space */
+ PPCDBG(PPCDBG_PHBINIT, "\tMem Space\n");
+ phb->pci_mem_offset = range.parent_addr -
+ ((((unsigned long)
+ range.child_addr.a_mid) << 32) |
+ (range.child_addr.a_lo));
+ PPCDBG(PPCDBG_PHBINIT, "\tpci_mem_offset = 0x%lx\n",
+ phb->pci_mem_offset);
+ if (memno < sizeof(phb->mem_resources)/sizeof(phb->mem_resources[0])) {
+ res = &(phb->mem_resources[memno]);
+ ++memno;
+ res->name = Pci_Node->full_name;
+ res->flags = IORESOURCE_MEM;
+#ifdef CONFIG_PPC_EEH
+ res->start = eeh_token(index, 0, 0, 0);
+ res->end = eeh_token(index, 0xff, 0xff, 0xffffffff);
+#else
+ res->start = range.parent_addr;
+ res->end = range.parent_addr + range.size - 1;
+#endif
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+ }
+ break;
+ }
+ }
+ PPCDBG(PPCDBG_PHBINIT, "\tphb->io_base_phys = 0x%lx\n",
+ phb->io_base_phys);
+ PPCDBG(PPCDBG_PHBINIT, "\tphb->pci_mem_offset = 0x%lx\n",
+ phb->pci_mem_offset);
+
+ if (naca->interrupt_controller == IC_OPEN_PIC) {
+ int addr = root_addr_size_words * (index + 2) - 1;
+ openpic_setup_ISU(index, opprop[addr]);
+ }
+ index++;
+ }
+ pci_devs_phb_init();
+ return 0; /*Success */
+}
+
+/******************************************************************
+ *
+ * Allocate and partially initialize a structure to represent a PHB.
+ *
+ ******************************************************************/
+struct pci_controller *
+alloc_phb(struct device_node *dev, char *model, unsigned int addr_size_words)
+{
+ struct pci_controller *phb;
+ unsigned int *ui_ptr = NULL, len;
+ struct reg_property64 reg_struct;
+ int *bus_range;
+ int *buid_vals;
+
+ PPCDBG(PPCDBG_PHBINIT, "alloc_phb: %s\n", dev->full_name);
+ PPCDBG(PPCDBG_PHBINIT, "\tdev = 0x%lx\n", dev);
+ PPCDBG(PPCDBG_PHBINIT, "\tmodel = 0x%lx\n", model);
+ PPCDBG(PPCDBG_PHBINIT, "\taddr_size_words = 0x%lx\n", addr_size_words);
+
+ /* Found a PHB, now figure out where his registers are mapped. */
+ ui_ptr = (unsigned int *) get_property(dev, "reg", &len);
+ if (ui_ptr == NULL) {
+ PPCDBG(PPCDBG_PHBINIT, "\tget reg failed.\n");
+ return(NULL);
+ }
+
+ if (addr_size_words == 1) {
+ reg_struct.address = ((struct reg_property32 *)ui_ptr)->address;
+ reg_struct.size = ((struct reg_property32 *)ui_ptr)->size;
+ } else {
+ reg_struct = *((struct reg_property64 *)ui_ptr);
+ }
+
+ PPCDBG(PPCDBG_PHBINIT, "\treg_struct.address = 0x%lx\n", reg_struct.address);
+ PPCDBG(PPCDBG_PHBINIT, "\treg_struct.size = 0x%lx\n", reg_struct.size);
+
+ /***************************************************************
+ * Set chip specific data in the phb, including types &
+ * register pointers.
+ ***************************************************************/
+
+ /****************************************************************
+ * Python
+ ***************************************************************/
+ if (strstr(model, "Python")) {
+ PPCDBG(PPCDBG_PHBINIT, "\tCreate python\n");
+ phb = pci_alloc_pci_controller("PHB PY",phb_type_python);
+ if (phb == NULL) return NULL;
+
+ phb->cfg_addr = (volatile unsigned long *)
+ ioremap(reg_struct.address + 0xf8000, PAGE_SIZE);
+ PPCDBG(PPCDBG_PHBINIT, "\tcfg_addr_r = 0x%lx\n",
+ reg_struct.address + 0xf8000);
+ PPCDBG(PPCDBG_PHBINIT, "\tcfg_addr_v = 0x%lx\n",
+ phb->cfg_addr);
+ phb->cfg_data = (char*)(phb->cfg_addr + 0x02);
+ phb->phb_regs = (volatile unsigned long *)
+ ioremap(reg_struct.address + 0xf7000, PAGE_SIZE);
+ /* Python's register file is 1 MB in size. */
+ phb->chip_regs = ioremap(reg_struct.address & ~(0xfffffUL),
+ 0x100000);
+
+ /*
+ * Firmware doesnt always clear this bit which is critical
+ * for good performance - Anton
+ */
+ {
+ volatile u32 *tmp, i;
+
+#define PRG_CL_RESET_VALID 0x00010000
+
+ tmp = (u32 *)((unsigned long)phb->chip_regs + 0xf6030);
+
+ if (*tmp & PRG_CL_RESET_VALID) {
+ printk("Python workaround: ");
+ *tmp &= ~PRG_CL_RESET_VALID;
+ /*
+ * We must read it back for changes to
+ * take effect
+ */
+ i = *tmp;
+ printk("reg0: %x\n", i);
+ }
+ }
+
+ /***************************************************************
+ * Speedwagon
+ ***************************************************************/
+ } else if (strstr(model, "Speedwagon")) {
+ PPCDBG(PPCDBG_PHBINIT, "\tCreate speedwagon\n");
+ phb = pci_alloc_pci_controller("PHB SW",phb_type_speedwagon);
+ if (phb == NULL) return NULL;
+
+ if (_machine == _MACH_pSeries) {
+ phb->cfg_addr = (volatile unsigned long *)
+ ioremap(reg_struct.address + 0x140, PAGE_SIZE);
+ phb->cfg_data = (char*)(phb->cfg_addr - 0x02); /* minus is correct */
+ phb->phb_regs = (volatile unsigned long *)
+ ioremap(reg_struct.address, PAGE_SIZE);
+ /* Speedwagon's register file is 1 MB in size. */
+ phb->chip_regs = ioremap(reg_struct.address & ~(0xfffffUL),
+ 0x100000);
+ PPCDBG(PPCDBG_PHBINIT, "\tmapping chip_regs from 0x%lx -> 0x%lx\n",
+ reg_struct.address & 0xfffff, phb->chip_regs);
+ } else {
+ phb->cfg_addr = NULL;
+ phb->cfg_data = NULL;
+ phb->phb_regs = NULL;
+ phb->chip_regs = NULL;
+ }
+
+ phb->local_number = ((reg_struct.address >> 12) & 0xf) - 0x8;
+ /***************************************************************
+ * Trying to build a known just gets the code in trouble.
+ ***************************************************************/
+ } else {
+ PPCDBG(PPCDBG_PHBINIT, "\tUnknown PHB Type!\n");
+ printk("PCI: Unknown Phb Type!\n");
+ return NULL;
+ }
+
+ bus_range = (int *) get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ PPCDBG(PPCDBG_PHBINIT, "Can't get bus-range for %s\n", dev->full_name);
+ kfree(phb);
+ return(NULL);
+ }
+
+ /***************************************************************
+ * Finished with the initialization
+ ***************************************************************/
+ phb->first_busno = bus_range[0];
+ phb->last_busno = bus_range[1];
+
+ phb->arch_data = dev;
+ phb->ops = &rtas_pci_ops;
+
+ buid_vals = (int *) get_property(dev, "ibm,fw-phb-id", &len);
+ if (buid_vals == NULL || len < 2 * sizeof(int)) {
+ phb->buid = 0;
+ } else {
+ /* Big bus system. These systems start new bus numbers under
+ * each phb. Until pci domains are standard, we depend on a
+ * patch which makes bus numbers ints and we shift the phb
+ * number into the upper bits.
+ */
+ struct pci_bus check;
+ if (sizeof(check.number) == 1 || sizeof(check.primary) == 1 ||
+ sizeof(check.secondary) == 1 || sizeof(check.subordinate) == 1) {
+ udbg_printf("pSeries_pci: this system has large bus numbers and the kernel was not\n"
+ "built with the patch that fixes include/linux/pci.h struct pci_bus so\n"
+ "number, primary, secondary and subordinate are ints.\n");
+ panic("pSeries_pci: this system has large bus numbers and the kernel was not\n"
+ "built with the patch that fixes include/linux/pci.h struct pci_bus so\n"
+ "number, primary, secondary and subordinate are ints.\n");
+ }
+ phb->buid = (((unsigned long)buid_vals[0]) << 32UL) |
+ (((unsigned long)buid_vals[1]) & 0xffffffff);
+ phb->first_busno += (phb->global_number << 8);
+ phb->last_busno += (phb->global_number << 8);
+ }
+
+ /* Dump PHB information for Debug */
+ PPCDBGCALL(PPCDBG_PHBINIT,dumpPci_Controller(phb) );
+
+ return phb;
+}
+
+void
+fixup_resources(struct pci_dev *dev)
+{
+ int i;
+ struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
+#ifdef CONFIG_PPC_EEH
+ struct device_node *dn;
+ unsigned long eeh_disable_bit;
+
+ /* Add IBM loc code (slot) as a prefix to the device names for service */
+ dn = pci_device_to_OF_node(dev);
+ if (dn) {
+ char *loc_code = get_property(dn, "ibm,loc-code", 0);
+ if (loc_code) {
+ int loc_len = strlen(loc_code);
+ if (loc_len < sizeof(dev->name)) {
+ memmove(dev->name+loc_len+1, dev->name, sizeof(dev->name)-loc_len-1);
+ memcpy(dev->name, loc_code, loc_len);
+ dev->name[loc_len] = ' ';
+ dev->name[sizeof(dev->name)-1] = '\0';
+ }
+ }
+ }
+
+ if (is_eeh_configured(dev)) {
+ eeh_disable_bit = 0;
+ printk("PCI: eeh configured for %s %s\n", dev->slot_name, dev->name);
+ if (eeh_set_option(dev, EEH_ENABLE) != 0) {
+ printk("PCI: failed to enable eeh for %s %s\n", dev->slot_name, dev->name);
+ eeh_disable_bit = EEH_TOKEN_DISABLED;
+ }
+ } else {
+ /* Assume device is by default EEH_DISABLE'd */
+ printk("PCI: eeh NOT configured for %s %s\n", dev->slot_name, dev->name);
+ eeh_disable_bit = EEH_TOKEN_DISABLED;
+ }
+#endif
+
+ PPCDBG(PPCDBG_PHBINIT, "fixup_resources:\n");
+ PPCDBG(PPCDBG_PHBINIT, "\tphb = 0x%016LX\n", phb);
+ PPCDBG(PPCDBG_PHBINIT, "\tphb->pci_io_offset = 0x%016LX\n", phb->pci_io_offset);
+ PPCDBG(PPCDBG_PHBINIT, "\tphb->pci_mem_offset = 0x%016LX\n", phb->pci_mem_offset);
+
+ PPCDBG(PPCDBG_PHBINIT, "\tdev->name = %s\n", dev->name);
+ PPCDBG(PPCDBG_PHBINIT, "\tdev->vendor:device = 0x%04X : 0x%04X\n", dev->vendor, dev->device);
+
+ if (phb == NULL)
+ return;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) {
+ PPCDBG(PPCDBG_PHBINIT, "\tdevice %x.%x[%d] (flags %x) [%lx..%lx]\n",
+ dev->bus->number, dev->devfn, i,
+ dev->resource[i].flags,
+ dev->resource[i].start,
+ dev->resource[i].end);
+
+ if ((dev->resource[i].start == 0) && (dev->resource[i].end == 0)) {
+ continue;
+ }
+
+ if (dev->resource[i].flags & IORESOURCE_IO) {
+#ifdef CONFIG_PPC_EEH
+ unsigned int busno = dev->bus ? dev->bus->number : 0;
+ unsigned long size = dev->resource[i].end - dev->resource[i].start;
+ unsigned long addr = (unsigned long)__ioremap(dev->resource[i].start + phb->io_base_phys, size, _PAGE_NO_CACHE);
+ if (!addr)
+ panic("fixup_resources: ioremap failed!\n");
+ dev->resource[i].start = eeh_token(phb->global_number, busno, dev->devfn, addr) | eeh_disable_bit;
+ dev->resource[i].end = dev->resource[i].start + size;
+#else
+ unsigned long offset = (unsigned long)phb->io_base_virt;
+ dev->resource[i].start += offset;
+ dev->resource[i].end += offset;
+#endif
+ PPCDBG(PPCDBG_PHBINIT, "\t\t-> now [%lx .. %lx]\n",
+ dev->resource[i].start, dev->resource[i].end);
+ } else if (dev->resource[i].flags & IORESOURCE_MEM) {
+ if (dev->resource[i].start == 0) {
+ /* Bogus. Probably an unused bridge. */
+ dev->resource[i].end = 0;
+ } else {
+#ifdef CONFIG_PPC_EEH
+ unsigned int busno = dev->bus ? dev->bus->number : 0;
+ unsigned long size = dev->resource[i].end - dev->resource[i].start;
+ unsigned long addr = (unsigned long)__ioremap(dev->resource[i].start + phb->pci_mem_offset, size, _PAGE_NO_CACHE);
+ if (!addr)
+ panic("fixup_resources: ioremap failed!\n");
+ dev->resource[i].start = eeh_token(phb->global_number, busno, dev->devfn, addr) | eeh_disable_bit;
+ dev->resource[i].end = dev->resource[i].start + size;
+#else
+ dev->resource[i].start += phb->pci_mem_offset;
+ dev->resource[i].end += phb->pci_mem_offset;
+#endif
+ }
+ PPCDBG(PPCDBG_PHBINIT, "\t\t-> now [%lx..%lx]\n",
+ dev->resource[i].start, dev->resource[i].end);
+
+ } else {
+ continue;
+ }
+
+ /* zap the 2nd function of the winbond chip */
+ if (dev->resource[i].flags & IORESOURCE_IO
+ && dev->bus->number == 0 && dev->devfn == 0x81)
+ dev->resource[i].flags &= ~IORESOURCE_IO;
+ }
+}
+
+static void check_s7a(void)
+{
+ struct device_node *root;
+ char *model;
+
+ root = find_path_device("/");
+ if (root) {
+ model = get_property(root, "model", NULL);
+ if (model && !strcmp(model, "IBM,7013-S7A"))
+ s7a_workaround = 1;
+ }
+}
+
+void __init
+pSeries_pcibios_fixup(void)
+{
+ struct pci_dev *dev;
+
+ PPCDBG(PPCDBG_PHBINIT, "pSeries_pcibios_fixup: start\n");
+ pci_assign_all_busses = 0;
+
+ check_s7a();
+
+ pci_for_each_dev(dev) {
+ pci_read_irq_line(dev);
+ PPCDBGCALL(PPCDBG_PHBINIT, dumpPci_Dev(dev) );
+ }
+
+ if (naca->interrupt_controller == IC_PPC_XIC) {
+ xics_isa_init();
+ }
+}
+
+/***********************************************************************
+ * pci_find_hose_for_OF_device
+ *
+ * This function finds the PHB that matching device_node in the
+ * OpenFirmware by scanning all the pci_controllers.
+ *
+ ***********************************************************************/
+struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node *node)
+{
+ while (node) {
+ struct pci_controller *hose;
+ for (hose=hose_head;hose;hose=hose->next)
+ if (hose->arch_data == node)
+ return hose;
+ node=node->parent;
+ }
+ return NULL;
+}
+
+/***********************************************************************
+ * find_floppy(void)
+ *
+ * Finds the default floppy device, if the system has one, and returns
+ * the pci_dev for the isa bridge for the floppy device.
+ *
+ * Note: This functions finds the first "fdc" device and then looks to
+ * the parent device which should be the isa bridge device. If there
+ * is more than one floppy on the system, it will find the first one
+ * and maybe that is okay.
+ ***********************************************************************/
+struct pci_dev*
+find_floppy(void)
+{
+ struct device_node *floppy_dn;
+ struct pci_dev *floppy_dev = NULL;
+ int *reg;
+
+ floppy_dn = find_type_devices("fdc");
+ if (floppy_dn && floppy_dn->parent) {
+ if ((reg = (unsigned int *)get_property(floppy_dn->parent,"reg", 0)) != NULL)
+ floppy_dev = pci_find_slot((reg[0] & 0x00ff0000) >> 16, (reg[0] & 0x0000ff00) >> 8);
+ }
+ PPCDBG(PPCDBG_BUSWALK,"\tFloppy pci_dev\n");
+ PPCDBGCALL(PPCDBG_BUSWALK, dumpPci_Dev(floppy_dev) );
+ return floppy_dev;
+}
+
+/***********************************************************************
+ * ppc64_pcibios_init
+ *
+ * Chance to initialize and structures or variable before PCI Bus walk.
+ *
+ ***********************************************************************/
+void
+pSeries_pcibios_init(void)
+{
+ PPCDBG(PPCDBG_PHBINIT, "\tppc64_pcibios_init Entry.\n");
+
+ if (get_property(find_path_device("/rtas"),"ibm,fw-phb-id",NULL) != NULL) {
+ PPCDBG(PPCDBG_PHBINIT, "\tFound: ibm,fw-phb-id\n");
+ Pci_Large_Bus_System = 1;
+ }
+}
+
+/*
+ * This is called very early before the page table is setup.
+ */
+void
+pSeries_pcibios_init_early(void)
+{
+ ppc_md.pcibios_read_config_byte = rtas_read_config_byte;
+ ppc_md.pcibios_read_config_word = rtas_read_config_word;
+ ppc_md.pcibios_read_config_dword = rtas_read_config_dword;
+ ppc_md.pcibios_write_config_byte = rtas_write_config_byte;
+ ppc_md.pcibios_write_config_word = rtas_write_config_word;
+ ppc_md.pcibios_write_config_dword = rtas_write_config_dword;
+}
+/************************************************************************/
+/* Get a char* of the device physical location(U0.3-P1-I8) */
+/* See the Product Topology in the RS/6000 Architecture. */
+/************************************************************************/
+int device_Location(struct pci_dev *PciDev, char *BufPtr)
+{
+ struct device_node *DevNode = (struct device_node *)PciDev->sysdata;
+ return sprintf(BufPtr,"PCI: Bus%3d, Device%3d, Vendor %04X, Location %-12s",
+ PciDev->bus->number,
+ PCI_SLOT(PciDev->devfn),
+ PciDev->vendor,
+ (char*)get_property(DevNode,"ibm,loc-code",0));
+}
+/************************************************************************/
+/* Set the slot reset line to the state passed in. */
+/* This is the platform specific for code for the pci_reset_device */
+/* function. */
+/************************************************************************/
+int pci_set_reset(struct pci_dev *PciDev, int state)
+{
+ return -1;
+}
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define __KERNEL__ 1
+#include <asm/types.h>
+#include <asm/page.h>
+#include <stddef.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/Paca.h>
+
+
+/* The Paca is an array with one entry per processor. Each contains an
+ * ItLpPaca, which contains the information shared between the
+ * hypervisor and Linux. Each also contains an ItLpRegSave area which
+ * is used by the hypervisor to save registers.
+ * On systems with hardware multi-threading, there are two threads
+ * per processor. The Paca array must contain an entry for each thread.
+ * The VPD Areas will give a max logical processors = 2 * max physical
+ * processors. The processor VPD array needs one entry per physical
+ * processor (not thread).
+ */
+#define PACAINITDATA(number,start,lpq,asrr,asrv) \
+{ \
+ xLpPacaPtr: &xPaca[number].xLpPaca, \
+ xLpRegSavePtr: &xPaca[number].xRegSav, \
+ xPacaIndex: (number), /* Paca Index */ \
+ default_decr: 0x00ff0000, /* Initial Decr */ \
+ xStab_data: { \
+ real: (asrr), /* Real pointer to segment table */ \
+ virt: (asrv), /* Virt pointer to segment table */ \
+ next_round_robin: 1 /* Round robin index */ \
+ }, \
+ lpQueuePtr: (lpq), /* &xItLpQueue, */ \
+ xRtas: { \
+ lock: SPIN_LOCK_UNLOCKED \
+ }, \
+ xProcStart: (start), /* Processor start */ \
+ xLpPaca: { \
+ xDesc: 0xd397d781, /* "LpPa" */ \
+ xSize: sizeof(struct ItLpPaca), \
+ xFPRegsInUse: 1, \
+ xDynProcStatus: 2, \
+ xDecrVal: 0x00ff0000, \
+ xEndOfQuantum: 0xffffffffffffffff \
+ }, \
+ xRegSav: { \
+ xDesc: 0xd397d9e2, /* "LpRS" */ \
+ xSize: sizeof(struct ItLpRegSave) \
+ }, \
+ exception_sp: \
+ (&xPaca[number].exception_stack[0]) - EXC_FRAME_SIZE, \
+}
+
+struct Paca xPaca[maxPacas] __page_aligned = {
+#ifdef CONFIG_PPC_ISERIES
+ PACAINITDATA( 0, 1, &xItLpQueue, 0, 0xc000000000005000),
+#else
+ PACAINITDATA( 0, 1, 0, 0x5000, 0xc000000000005000),
+#endif
+ PACAINITDATA( 1, 0, 0, 0, 0),
+ PACAINITDATA( 2, 0, 0, 0, 0),
+ PACAINITDATA( 3, 0, 0, 0, 0),
+ PACAINITDATA( 4, 0, 0, 0, 0),
+ PACAINITDATA( 5, 0, 0, 0, 0),
+ PACAINITDATA( 6, 0, 0, 0, 0),
+ PACAINITDATA( 7, 0, 0, 0, 0),
+ PACAINITDATA( 8, 0, 0, 0, 0),
+ PACAINITDATA( 9, 0, 0, 0, 0),
+ PACAINITDATA(10, 0, 0, 0, 0),
+ PACAINITDATA(11, 0, 0, 0, 0),
+ PACAINITDATA(12, 0, 0, 0, 0),
+ PACAINITDATA(13, 0, 0, 0, 0),
+ PACAINITDATA(14, 0, 0, 0, 0),
+ PACAINITDATA(15, 0, 0, 0, 0),
+ PACAINITDATA(16, 0, 0, 0, 0),
+ PACAINITDATA(17, 0, 0, 0, 0),
+ PACAINITDATA(18, 0, 0, 0, 0),
+ PACAINITDATA(19, 0, 0, 0, 0),
+ PACAINITDATA(20, 0, 0, 0, 0),
+ PACAINITDATA(21, 0, 0, 0, 0),
+ PACAINITDATA(22, 0, 0, 0, 0),
+ PACAINITDATA(23, 0, 0, 0, 0),
+ PACAINITDATA(24, 0, 0, 0, 0),
+ PACAINITDATA(25, 0, 0, 0, 0),
+ PACAINITDATA(26, 0, 0, 0, 0),
+ PACAINITDATA(27, 0, 0, 0, 0),
+ PACAINITDATA(28, 0, 0, 0, 0),
+ PACAINITDATA(29, 0, 0, 0, 0),
+ PACAINITDATA(30, 0, 0, 0, 0),
+ PACAINITDATA(31, 0, 0, 0, 0),
+ PACAINITDATA(32, 0, 0, 0, 0),
+ PACAINITDATA(33, 0, 0, 0, 0),
+ PACAINITDATA(34, 0, 0, 0, 0),
+ PACAINITDATA(35, 0, 0, 0, 0),
+ PACAINITDATA(36, 0, 0, 0, 0),
+ PACAINITDATA(37, 0, 0, 0, 0),
+ PACAINITDATA(38, 0, 0, 0, 0),
+ PACAINITDATA(39, 0, 0, 0, 0),
+ PACAINITDATA(40, 0, 0, 0, 0),
+ PACAINITDATA(41, 0, 0, 0, 0),
+ PACAINITDATA(42, 0, 0, 0, 0),
+ PACAINITDATA(43, 0, 0, 0, 0),
+ PACAINITDATA(44, 0, 0, 0, 0),
+ PACAINITDATA(45, 0, 0, 0, 0),
+ PACAINITDATA(46, 0, 0, 0, 0),
+ PACAINITDATA(47, 0, 0, 0, 0)
+};
--- /dev/null
+/*
+ *
+ *
+ * Port for PPC64 David Engebretsen, IBM Corp.
+ * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/ppcdebug.h>
+#include <asm/Naca.h>
+#include <asm/pci_dma.h>
+#include <asm/machdep.h>
+#ifdef CONFIG_PPC_EEH
+#include <asm/eeh.h>
+#endif
+
+#include "pci.h"
+
+/* pci_io_base -- the base address from which io bars are offsets.
+ * This is the lowest I/O base address (so bar values are always positive),
+ * and it *must* be the start of ISA space if an ISA bus exists because
+ * ISA drivers use hard coded offsets. If no ISA bus exists a dummy
+ * page is mapped and isa_io_limit prevents access to it.
+ */
+unsigned long isa_io_base = 0; /* NULL if no ISA bus */
+unsigned long pci_io_base = 0;
+unsigned long isa_mem_base = 0;
+unsigned long pci_dram_offset = 0;
+
+/******************************************************************
+ * Forward declare of prototypes
+ ******************************************************************/
+static void pcibios_fixup_resources(struct pci_dev* dev);
+static void fixup_broken_pcnet32(struct pci_dev* dev);
+static void fixup_windbond_82c105(struct pci_dev* dev);
+void fixup_resources(struct pci_dev* dev);
+
+struct pci_dev *find_floppy(void);
+void iSeries_pcibios_init(void);
+void pSeries_pcibios_init(void);
+
+
+extern struct Naca *naca;
+
+int pci_assign_all_busses = 0;
+
+struct pci_controller* hose_head;
+struct pci_controller** hose_tail = &hose_head;
+
+/*******************************************************************
+ * Counters and control flags.
+ *******************************************************************/
+long Pci_Io_Read_Count = 0;
+long Pci_Io_Write_Count = 0;
+long Pci_Cfg_Read_Count = 0;
+long Pci_Cfg_Write_Count= 0;
+long Pci_Error_Count = 0;
+
+int Pci_Retry_Max = 3; /* Only retry 3 times */
+int Pci_Error_Flag = 1; /* Set Retry Error on. */
+int Pci_Trace_Flag = 0;
+
+/******************************************************************
+ *
+ ******************************************************************/
+int global_phb_number = 0; /* Global phb counter */
+int Pci_Large_Bus_System = 0;
+int Pci_Set_IOA_Address = 0;
+int Pci_Manage_Phb_Space = 0;
+struct pci_controller *phbtab[PCI_MAX_PHB];
+
+static int pci_bus_count;
+
+/* Floppy dev for ppc64_fd_dma_setup(). May be null if no floppy in the system. */
+struct pci_dev *ppc64_floppy_dev = NULL;
+
+struct pci_fixup pcibios_fixups[] = {
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32 },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, fixup_windbond_82c105 },
+ { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
+ { 0 }
+};
+
+static void fixup_broken_pcnet32(struct pci_dev* dev)
+{
+ if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
+ dev->vendor = PCI_VENDOR_ID_AMD;
+ pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
+ pci_name_device(dev);
+ }
+}
+
+static void fixup_windbond_82c105(struct pci_dev* dev)
+{
+ /* Assume the windbond 82c105 is the IDE controller on a
+ * p610. We should probably be more careful in case
+ * someone tries to plug in a similar adapter.
+ */
+ unsigned int reg;
+
+ printk("Using INTC for W82c105 IDE controller.\n");
+ pci_read_config_dword(dev, 0x40, ®);
+ /* Enable LEGIRQ to use INTC instead of ISA interrupts */
+ pci_write_config_dword(dev, 0x40, reg | (1<<11));
+}
+
+
+void pcibios_fixup_pbus_ranges(struct pci_bus *pbus,
+ struct pbus_set_ranges_data *pranges)
+{
+}
+
+
+void
+pcibios_update_resource(struct pci_dev *dev, struct resource *root,
+ struct resource *res, int resource)
+{
+ u32 new, check;
+ int reg;
+ struct pci_controller* hose = PCI_GET_PHB_PTR(dev);
+
+ new = res->start;
+ if (hose && res->flags & IORESOURCE_MEM)
+ new -= hose->pci_mem_offset;
+ new |= (res->flags & PCI_REGION_FLAG_MASK);
+ if (resource < 6) {
+ reg = PCI_BASE_ADDRESS_0 + 4*resource;
+ } else if (resource == PCI_ROM_RESOURCE) {
+ res->flags |= PCI_ROM_ADDRESS_ENABLE;
+ reg = dev->rom_base_reg;
+ } else {
+ /* Somebody might have asked allocation of a non-standard resource */
+ return;
+ }
+
+ pci_write_config_dword(dev, reg, new);
+ pci_read_config_dword(dev, reg, &check);
+ if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
+ printk(KERN_ERR "PCI: Error while updating region "
+ "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
+ new, check);
+ }
+}
+
+static void
+pcibios_fixup_resources(struct pci_dev* dev)
+{
+ fixup_resources(dev);
+}
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+void
+pcibios_align_resource(void *data, struct resource *res, unsigned long size)
+{
+ struct pci_dev *dev = data;
+
+ if (res->flags & IORESOURCE_IO) {
+ unsigned long start = res->start;
+
+ if (size > 0x100) {
+ printk(KERN_ERR "PCI: Can not align I/O Region %s %s because size %ld is too large.\n",
+ dev->slot_name, res->name, size);
+ }
+
+ if (start & 0x300) {
+ start = (start + 0x3ff) & ~0x3ff;
+ res->start = start;
+ }
+ }
+}
+
+
+/*
+ * Handle resources of PCI devices. If the world were perfect, we could
+ * just allocate all the resource regions and do nothing more. It isn't.
+ * On the other hand, we cannot just re-allocate all devices, as it would
+ * require us to know lots of host bridge internals. So we attempt to
+ * keep as much of the original configuration as possible, but tweak it
+ * when it's found to be wrong.
+ *
+ * Known BIOS problems we have to work around:
+ * - I/O or memory regions not configured
+ * - regions configured, but not enabled in the command register
+ * - bogus I/O addresses above 64K used
+ * - expansion ROMs left enabled (this may sound harmless, but given
+ * the fact the PCI specs explicitly allow address decoders to be
+ * shared between expansion ROMs and other resource regions, it's
+ * at least dangerous)
+ *
+ * Our solution:
+ * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
+ * This gives us fixed barriers on where we can allocate.
+ * (2) Allocate resources for all enabled devices. If there is
+ * a collision, just mark the resource as unallocated. Also
+ * disable expansion ROMs during this step.
+ * (3) Try to allocate resources for disabled devices. If the
+ * resources were assigned correctly, everything goes well,
+ * if they weren't, they won't disturb allocation of other
+ * resources.
+ * (4) Assign new addresses to resources which were either
+ * not configured at all or misconfigured. If explicitly
+ * requested by the user, configure expansion ROM address
+ * as well.
+ */
+
+static void __init
+pcibios_allocate_bus_resources(struct list_head *bus_list)
+{
+ struct list_head *ln;
+ struct pci_bus *bus;
+ int i;
+ struct resource *res, *pr;
+
+ /* Depth-First Search on bus tree */
+ for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
+ bus = pci_bus_b(ln);
+ for (i = 0; i < 4; ++i) {
+ if ((res = bus->resource[i]) == NULL || !res->flags)
+ continue;
+ if (bus->parent == NULL)
+ pr = (res->flags & IORESOURCE_IO)?
+ &ioport_resource: &iomem_resource;
+ else
+ pr = pci_find_parent_resource(bus->self, res);
+
+ if (pr == res)
+ continue; /* transparent bus or undefined */
+ if (pr && request_resource(pr, res) == 0)
+ continue;
+ printk(KERN_ERR "PCI: Cannot allocate resource region "
+ "%d of PCI bridge %x\n", i, bus->number);
+ printk(KERN_ERR "PCI: resource is %lx..%lx (%lx), parent %p\n",
+ res->start, res->end, res->flags, pr);
+ }
+ pcibios_allocate_bus_resources(&bus->children);
+ }
+}
+
+static void __init
+pcibios_allocate_resources(int pass)
+{
+ struct pci_dev *dev;
+ int idx, disabled;
+ u16 command;
+ struct resource *r, *pr;
+
+ pci_for_each_dev(dev) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ for(idx = 0; idx < 6; idx++) {
+ r = &dev->resource[idx];
+ if (r->parent) /* Already allocated */
+ continue;
+ if (!r->start) /* Address not assigned at all */
+ continue;
+
+ if (r->flags & IORESOURCE_IO)
+ disabled = !(command & PCI_COMMAND_IO);
+ else
+ disabled = !(command & PCI_COMMAND_MEMORY);
+ if (pass == disabled) {
+ PPCDBG(PPCDBG_PHBINIT,
+ "PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
+ r->start, r->end, r->flags, disabled, pass);
+ pr = pci_find_parent_resource(dev, r);
+ if (!pr || request_resource(pr, r) < 0) {
+ PPCDBG(PPCDBG_PHBINIT,
+ "PCI: Cannot allocate resource region %d of device %s, pr = 0x%lx\n", idx, dev->slot_name, pr);
+ if(pr) {
+ PPCDBG(PPCDBG_PHBINIT,
+ "PCI: Cannot allocate resource 0x%lx\n", request_resource(pr,r));
+ }
+ /* We'll assign a new address later */
+ r->end -= r->start;
+ r->start = 0;
+ }
+ }
+ }
+ if (!pass) {
+ r = &dev->resource[PCI_ROM_RESOURCE];
+ if (r->flags & PCI_ROM_ADDRESS_ENABLE) {
+ /* Turn the ROM off, leave the resource region, but keep it unregistered. */
+ u32 reg;
+ r->flags &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_read_config_dword(dev, dev->rom_base_reg, ®);
+ pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
+ }
+ }
+ }
+}
+
+static void __init
+pcibios_assign_resources(void)
+{
+ struct pci_dev *dev;
+ int idx;
+ struct resource *r;
+
+ pci_for_each_dev(dev) {
+ int class = dev->class >> 8;
+
+ /* Don't touch classless devices and host bridges */
+ if (!class || class == PCI_CLASS_BRIDGE_HOST)
+ continue;
+
+ for(idx=0; idx<6; idx++) {
+ r = &dev->resource[idx];
+
+ /*
+ * Don't touch IDE controllers and I/O ports of video cards!
+ */
+ if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
+ (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
+ continue;
+
+ /*
+ * We shall assign a new address to this resource, either because
+ * the BIOS forgot to do so or because we have decided the old
+ * address was unusable for some reason.
+ */
+ if (!r->start && r->end && ppc_md.pcibios_enable_device_hook &&
+ !ppc_md.pcibios_enable_device_hook(dev, 1))
+ pci_assign_resource(dev, idx);
+ }
+
+ if (0) { /* don't assign ROMs */
+ r = &dev->resource[PCI_ROM_RESOURCE];
+ r->end -= r->start;
+ r->start = 0;
+ if (r->end)
+ pci_assign_resource(dev, PCI_ROM_RESOURCE);
+ }
+ }
+}
+
+
+int
+pcibios_enable_resources(struct pci_dev *dev)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for(idx=0; idx<6; idx++) {
+ r = &dev->resource[idx];
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name);
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (dev->resource[PCI_ROM_RESOURCE].start)
+ cmd |= PCI_COMMAND_MEMORY;
+ if (cmd != old_cmd) {
+ printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+/*
+ * Allocate pci_controller(phb) initialized common variables.
+ */
+struct pci_controller * __init
+pci_alloc_pci_controller(char *model, enum phb_types controller_type)
+{
+ struct pci_controller *hose;
+ PPCDBG(PPCDBG_PHBINIT, "PCI: Allocate pci_controller for %s\n",model);
+ hose = (struct pci_controller *)alloc_bootmem(sizeof(struct pci_controller));
+ if(hose == NULL) {
+ printk(KERN_ERR "PCI: Allocate pci_controller failed.\n");
+ return NULL;
+ }
+ memset(hose, 0, sizeof(struct pci_controller));
+ if(strlen(model) < 8) strcpy(hose->what,model);
+ else memcpy(hose->what,model,7);
+ hose->type = controller_type;
+ hose->global_number = global_phb_number;
+ phbtab[global_phb_number++] = hose;
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+ return hose;
+}
+
+/*
+ * This fixup is arch independent and probably should go somewhere else.
+ */
+void __init
+pcibios_generic_fixup(void)
+{
+ struct pci_dev *dev;
+
+ /* Fix miss-identified vendor AMD pcnet32 adapters. */
+ dev = NULL;
+ while ((dev = pci_find_device(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, dev)) != NULL &&
+ dev->class == (PCI_CLASS_NETWORK_ETHERNET << 8))
+ dev->vendor = PCI_VENDOR_ID_AMD;
+}
+
+
+
+/***********************************************************************
+ *
+ *
+ *
+ ***********************************************************************/
+void __init
+pcibios_init(void)
+{
+ struct pci_controller *hose;
+ struct pci_bus *bus;
+ int next_busno;
+
+#ifndef CONFIG_PPC_ISERIES
+ pSeries_pcibios_init();
+#else
+ iSeries_pcibios_init();
+#endif
+
+ printk("PCI: Probing PCI hardware\n");
+ PPCDBG(PPCDBG_BUSWALK,"PCI: Probing PCI hardware\n");
+
+
+ /* Scan all of the recorded PCI controllers. */
+ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
+ hose->last_busno = 0xff;
+ bus = pci_scan_bus(hose->first_busno, hose->ops, hose->arch_data);
+ hose->bus = bus;
+ hose->last_busno = bus->subordinate;
+ if (pci_assign_all_busses || next_busno <= hose->last_busno)
+ next_busno = hose->last_busno+1;
+ }
+ pci_bus_count = next_busno;
+
+ /* Call machine dependant fixup */
+ if (ppc_md.pcibios_fixup) {
+ ppc_md.pcibios_fixup();
+ }
+
+ /* Generic fixups */
+ pcibios_generic_fixup();
+
+ /* Allocate and assign resources */
+ pcibios_allocate_bus_resources(&pci_root_buses);
+ pcibios_allocate_resources(0);
+ pcibios_allocate_resources(1);
+ pcibios_assign_resources();
+
+#ifndef CONFIG_PPC_ISERIES
+ pci_fix_bus_sysdata();
+
+ create_tce_tables();
+ PPCDBG(PPCDBG_BUSWALK,"pSeries create_tce_tables()\n");
+#endif
+ ppc64_floppy_dev = find_floppy();
+
+ printk("PCI: Probing PCI hardware done\n");
+ PPCDBG(PPCDBG_BUSWALK,"PCI: Probing PCI hardware done.\n");
+
+}
+
+int __init
+pcibios_assign_all_busses(void)
+{
+ return pci_assign_all_busses;
+}
+
+unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
+ unsigned long start, unsigned long size)
+{
+ return start;
+}
+
+void __init pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_controller *phb = PCI_GET_PHB_PTR(bus);
+ struct resource *res;
+ unsigned long io_offset;
+ int i;
+
+#ifndef CONFIG_PPC_ISERIES
+ if (bus->parent == NULL) {
+ /* This is a host bridge - fill in its resources */
+ phb->bus = bus;
+ bus->resource[0] = res = &phb->io_resource;
+ if (!res->flags)
+ BUG(); /* No I/O resource for this PHB? */
+
+ for (i = 0; i < 3; ++i) {
+ res = &phb->mem_resources[i];
+ if (!res->flags) {
+ if (i == 0)
+ BUG(); /* No memory resource for this PHB? */
+ }
+ bus->resource[i+1] = res;
+ }
+ } else {
+ /* This is a subordinate bridge */
+ pci_read_bridge_bases(bus);
+
+ for (i = 0; i < 4; ++i) {
+ if ((res = bus->resource[i]) == NULL)
+ continue;
+ if (!res->flags)
+ continue;
+ if (res == pci_find_parent_resource(bus->self, res)) {
+ /* Transparent resource -- don't try to "fix" it. */
+ continue;
+ }
+#ifdef CONFIG_PPC_EEH
+ if (res->flags & (IORESOURCE_IO|IORESOURCE_MEM)) {
+ res->start = eeh_token(phb->global_number, bus->number, 0, 0);
+ res->end = eeh_token(phb->global_number, bus->number, 0xff, 0xffffffff);
+ }
+#else
+ if (res->flags & IORESOURCE_IO) {
+ res->start += (unsigned long)phb->io_base_virt;
+ res->end += (unsigned long)phb->io_base_virt;
+ } else if (phb->pci_mem_offset
+ && (res->flags & IORESOURCE_MEM)) {
+ if (res->start < phb->pci_mem_offset) {
+ res->start += phb->pci_mem_offset;
+ res->end += phb->pci_mem_offset;
+ }
+ }
+#endif
+ }
+ }
+#endif
+ if ( ppc_md.pcibios_fixup_bus )
+ ppc_md.pcibios_fixup_bus(bus);
+}
+
+char __init *pcibios_setup(char *str)
+{
+ return str;
+}
+
+int pcibios_enable_device(struct pci_dev *dev)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ PPCDBG(PPCDBG_BUSWALK,"PCI: "__FUNCTION__" for device %s \n",dev->slot_name);
+ if (ppc_md.pcibios_enable_device_hook)
+ if (ppc_md.pcibios_enable_device_hook(dev, 0))
+ return -EINVAL;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx=0; idx<6; idx++) {
+ r = &dev->resource[idx];
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name);
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (cmd != old_cmd) {
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ dev->slot_name, old_cmd, cmd);
+ PPCDBG(PPCDBG_BUSWALK,"PCI: Enabling device %s \n",dev->slot_name);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+struct pci_controller*
+pci_bus_to_hose(int bus)
+{
+ struct pci_controller* hose = hose_head;
+
+ for (; hose; hose = hose->next)
+ if (bus >= hose->first_busno && bus <= hose->last_busno)
+ return hose;
+ return NULL;
+}
+
+void*
+pci_bus_io_base(unsigned int bus)
+{
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return NULL;
+ return hose->io_base_virt;
+}
+
+unsigned long
+pci_bus_io_base_phys(unsigned int bus)
+{
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return 0;
+ return hose->io_base_phys;
+}
+
+unsigned long
+pci_bus_mem_base_phys(unsigned int bus)
+{
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return 0;
+ return hose->pci_mem_offset;
+}
+
+/*
+ * Return the index of the PCI controller for device pdev.
+ */
+int pci_controller_num(struct pci_dev *dev)
+{
+ struct pci_controller *hose = PCI_GET_PHB_PTR(dev);
+
+ return hose->global_number;
+}
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s,
+ * modelled on the sparc64 implementation by Dave Miller.
+ * -- paulus.
+ */
+
+/*
+ * Adjust vm_pgoff of VMA such that it is the physical page offset
+ * corresponding to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap. They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static __inline__ int
+__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ struct pci_controller *hose = PCI_GET_PHB_PTR(dev);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long io_offset = 0;
+ int i, res_bit;
+
+ if (hose == 0)
+ return -EINVAL; /* should never happen */
+
+ /* If memory, add on the PCI bridge address offset */
+ if (mmap_state == pci_mmap_mem) {
+ offset += hose->pci_mem_offset;
+ res_bit = IORESOURCE_MEM;
+ } else {
+ io_offset = (unsigned long)hose->io_base_virt;
+ offset += io_offset;
+ res_bit = IORESOURCE_IO;
+ }
+
+ /*
+ * Check that the offset requested corresponds to one of the
+ * resources of the device.
+ */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ struct resource *rp = &dev->resource[i];
+ int flags = rp->flags;
+
+ /* treat ROM as memory (should be already) */
+ if (i == PCI_ROM_RESOURCE)
+ flags |= IORESOURCE_MEM;
+
+ /* Active and same type? */
+ if ((flags & res_bit) == 0)
+ continue;
+
+ /* In the range of this resource? */
+ if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
+ continue;
+
+ /* found it! construct the final physical address */
+ if (mmap_state == pci_mmap_io)
+ offset += hose->io_base_phys - io_offset;
+
+ vma->vm_pgoff = offset >> PAGE_SHIFT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
+ * mapping.
+ */
+static __inline__ void
+__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
+}
+
+/*
+ * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static __inline__ void
+__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ long prot = pgprot_val(vma->vm_page_prot);
+
+ /* XXX would be nice to have a way to ask for write-through */
+ prot |= _PAGE_NO_CACHE;
+ if (!write_combine)
+ prot |= _PAGE_GUARDED;
+ vma->vm_page_prot = __pgprot(prot);
+}
+
+/*
+ * Perform the actual remap of the pages for a PCI device mapping, as
+ * appropriate for this architecture. The region in the process to map
+ * is described by vm_start and vm_end members of VMA, the base physical
+ * address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state,
+ int write_combine)
+{
+ int ret;
+
+ ret = __pci_mmap_make_offset(dev, vma, mmap_state);
+ if (ret < 0)
+ return ret;
+
+ __pci_mmap_set_flags(dev, vma, mmap_state);
+ __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
+
+ ret = remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+
+ return ret;
+}
+
+/* Provide information on locations of various I/O regions in physical
+ * memory. Do this on a per-card basis so that we choose the right
+ * root bridge.
+ * Note that the returned IO or memory base is a physical address
+ */
+
+long
+sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
+{
+ struct pci_controller* hose = pci_bus_to_hose(bus);
+ long result = -EOPNOTSUPP;
+
+ if (!hose)
+ return -ENODEV;
+
+ switch (which) {
+ case IOBASE_BRIDGE_NUMBER:
+ return (long)hose->first_busno;
+ case IOBASE_MEMORY:
+ return (long)hose->pci_mem_offset;
+ case IOBASE_IO:
+ return (long)hose->io_base_phys;
+ case IOBASE_ISA_IO:
+ return (long)isa_io_base;
+ case IOBASE_ISA_MEM:
+ return (long)isa_mem_base;
+ }
+
+ return result;
+}
+/************************************************************************/
+/* Formats the device information and location for service. */
+/* - Pass in pci_dev* pointer to the device. */
+/* - Pass in buffer to place the data. Danger here is the buffer must */
+/* be as big as the client says it is. Should be at least 128 bytes.*/
+/* Return will the length of the string data put in the buffer. */
+/* The brand specific method device_Location is called. */
+/* Format: */
+/* PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet */
+/* PCI: Bus 0, Device 26, Vendor 0x12AE Location U0.3-P1-I8 Ethernet */
+/* For pSeries, see the Product Topology in the RS/6000 Architecture. */
+/* For iSeries, see the Service Manuals. */
+/************************************************************************/
+int format_device_location(struct pci_dev* PciDev,char* BufPtr, int BufferSize)
+{
+ struct device_node* DevNode = (struct device_node*)PciDev->sysdata;
+ int LineLen = 0;
+ if (DevNode != NULL && BufferSize >= 128) {
+ LineLen += device_Location(PciDev,BufPtr+LineLen);
+ LineLen += sprintf(BufPtr+LineLen," %12s",pci_class_name(PciDev->class >> 8) );
+ }
+ return LineLen;
+}
+/************************************************************************
+ * Saves the config registers for a device. *
+ ************************************************************************
+ * Note: This does byte reads so the data may appear byte swapped, *
+ * The data returned in the pci_config_reg_save_area structure can be *
+ * used to the restore of the data. If the save failed, the data *
+ * will not be restore. Yes I know, you are most likey toast. *
+ ************************************************************************/
+int pci_save_config_regs(struct pci_dev* PciDev,struct pci_config_reg_save_area* SaveArea)
+{
+ memset(SaveArea,0x00,sizeof(struct pci_config_reg_save_area) );
+ SaveArea->PciDev = PciDev;
+ SaveArea->RCode = 0;
+ SaveArea->Register = 0;
+ /******************************************************************
+ * Save All the Regs, NOTE: restore skips the first 16 bytes. *
+ ******************************************************************/
+ while (SaveArea->Register < REG_SAVE_SIZE && SaveArea->RCode == 0) {
+ SaveArea->RCode = pci_read_config_byte(PciDev, SaveArea->Register, &SaveArea->Regs[SaveArea->Register]);
+ ++SaveArea->Register;
+ }
+ if (SaveArea->RCode != 0) { /* Ouch */
+ SaveArea->Flags = 0x80;
+ printk("PCI: pci_restore_save_regs failed! %p\n 0x%04X",PciDev,SaveArea->RCode);
+ }
+ else {
+ SaveArea->Flags = 0x01;
+ }
+ return SaveArea->RCode;
+}
+
+/************************************************************************
+ * Restores the registers saved via the save function. See the save *
+ * function for details. *
+ ************************************************************************/
+int pci_restore_config_regs(struct pci_dev* PciDev,struct pci_config_reg_save_area* SaveArea)
+{
+ if (SaveArea->PciDev != PciDev || SaveArea->Flags == 0x80 || SaveArea->RCode != 0) {
+ printk("PCI: pci_restore_config_regs failed! %p\n",PciDev);
+ return -1;
+ }
+ /******************************************************************
+ * Don't touch the Cmd or BIST regs, user must restore those. *
+ * Restore PCI_VENDOR_ID & PCI_DEVICE_ID *
+ * Restore PCI_CACHE_LINE_SIZE & PCI_LATENCY_TIMER *
+ * Restore Saved Regs from 0x10 to 0x3F *
+ ******************************************************************/
+ SaveArea->Register = 0;
+ while(SaveArea->Register < REG_SAVE_SIZE && SaveArea->RCode == 0) {
+ SaveArea->RCode = pci_write_config_byte(PciDev,SaveArea->Register,SaveArea->Regs[SaveArea->Register]);
+ ++SaveArea->Register;
+ if ( SaveArea->Register == PCI_COMMAND) SaveArea->Register = PCI_CACHE_LINE_SIZE;
+ else if (SaveArea->Register == PCI_HEADER_TYPE) SaveArea->Register = PCI_BASE_ADDRESS_0;
+ }
+ if (SaveArea->RCode != 0) {
+ printk("PCI: pci_restore_config_regs failed! %p\n 0x%04X",PciDev,SaveArea->RCode);
+ }
+ return SaveArea->RCode;
+}
+
+/************************************************************************/
+/* Interface to toggle the reset line */
+/* Time is in .1 seconds, need for seconds. */
+/************************************************************************/
+int pci_reset_device(struct pci_dev* PciDev, int AssertTime, int DelayTime)
+{
+ unsigned long AssertDelay, WaitDelay;
+ int RtnCode;
+ /********************************************************************
+ * Set defaults, Assert is .5 second, Wait is 3 seconds.
+ ********************************************************************/
+ if (AssertTime == 0) AssertDelay = ( 5 * HZ)/10;
+ else AssertDelay = (AssertTime*HZ)/10;
+ if (WaitDelay == 0) WaitDelay = (30 * HZ)/10;
+ else WaitDelay = (DelayTime* HZ)/10;
+
+ /********************************************************************
+ * Assert reset, wait, de-assert reset, wait for IOA to reset.
+ * - Don't waste the CPU time on jiffies.
+ ********************************************************************/
+ RtnCode = pci_set_reset(PciDev,1);
+ if (RtnCode == 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(AssertDelay); /* Sleep for the time */
+ RtnCode = pci_set_reset(PciDev,0);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(WaitDelay);
+ }
+ if (RtnCode != 0) {
+ printk("PCI: Bus%3d, Device%3d, Reset Failed:0x%04X\n",PciDev->bus->number,PCI_SLOT(PciDev->devfn),RtnCode );
+ }
+ return RtnCode;
+}
+
+/*****************************************************
+ * Dump Resource information
+ *****************************************************/
+void dumpResources(struct resource* Resource)
+{
+ if(Resource != NULL) {
+ int Flags = 0x00000F00 & Resource->flags;
+ if(Resource->start == 0 && Resource->end == 0) return;
+ else if(Resource->start == Resource->end ) return;
+ else {
+ if (Flags == IORESOURCE_IO) udbg_printf("IO.:");
+ else if(Flags == IORESOURCE_MEM) udbg_printf("MEM:");
+ else if(Flags == IORESOURCE_IRQ) udbg_printf("IRQ:");
+ else udbg_printf("0x%02X:",Resource->flags);
+
+ }
+ udbg_printf("0x%016LX / 0x%016LX (0x%08X)\n",
+ Resource->start, Resource->end, Resource->end - Resource->start);
+ }
+}
+
+int resourceSize(struct resource* Resource)
+{
+ if(Resource->start == 0 && Resource->end == 0) return 0;
+ else if(Resource->start == Resource->end ) return 0;
+ else return (Resource->end-1)-Resource->start;
+}
+
+
+/*****************************************************
+ * Dump PHB information for Debug
+ *****************************************************/
+void dumpPci_Controller(struct pci_controller* phb)
+{
+ udbg_printf("\tpci_controller= 0x%016LX\n", phb);
+ if (phb != NULL) {
+ udbg_printf("\twhat & type = %s 0x%02X\n ",phb->what,phb->type);
+ udbg_printf("\tbus = ");
+ if (phb->bus != NULL) udbg_printf("0x%02X\n", phb->bus->number);
+ else udbg_printf("<NULL>\n");
+ udbg_printf("\tarch_data = 0x%016LX\n", phb->arch_data);
+ udbg_printf("\tfirst_busno = 0x%02X\n", phb->first_busno);
+ udbg_printf("\tlast_busno = 0x%02X\n", phb->last_busno);
+ udbg_printf("\tio_base_virt* = 0x%016LX\n", phb->io_base_virt);
+ udbg_printf("\tio_base_phys = 0x%016LX\n", phb->io_base_phys);
+ udbg_printf("\tpci_mem_offset= 0x%016LX\n", phb->pci_mem_offset);
+ udbg_printf("\tpci_io_offset = 0x%016LX\n", phb->pci_io_offset);
+
+ udbg_printf("\tcfg_addr = 0x%016LX\n", phb->cfg_addr);
+ udbg_printf("\tcfg_data = 0x%016LX\n", phb->cfg_data);
+ udbg_printf("\tphb_regs = 0x%016LX\n", phb->phb_regs);
+ udbg_printf("\tchip_regs = 0x%016LX\n", phb->chip_regs);
+
+
+ udbg_printf("\tResources\n");
+ dumpResources(&phb->io_resource);
+ if (phb->mem_resource_count > 0) dumpResources(&phb->mem_resources[0]);
+ if (phb->mem_resource_count > 1) dumpResources(&phb->mem_resources[1]);
+ if (phb->mem_resource_count > 2) dumpResources(&phb->mem_resources[2]);
+
+ udbg_printf("\tglobal_num = 0x%02X\n", phb->global_number);
+ udbg_printf("\tlocal_num = 0x%02X\n", phb->local_number);
+ }
+}
+
+/*****************************************************
+ * Dump PHB information for Debug
+ *****************************************************/
+void dumpPci_Bus(struct pci_bus* Pci_Bus)
+{
+ int i;
+ udbg_printf("\tpci_bus = 0x%016LX \n",Pci_Bus);
+ if (Pci_Bus != NULL) {
+
+ udbg_printf("\tnumber = 0x%02X \n",Pci_Bus->number);
+ udbg_printf("\tprimary = 0x%02X \n",Pci_Bus->primary);
+ udbg_printf("\tsecondary = 0x%02X \n",Pci_Bus->secondary);
+ udbg_printf("\tsubordinate = 0x%02X \n",Pci_Bus->subordinate);
+
+ for (i=0;i<4;++i) {
+ if(Pci_Bus->resource[i] == NULL) continue;
+ if(Pci_Bus->resource[i]->start == 0 && Pci_Bus->resource[i]->end == 0) break;
+ udbg_printf("\tResources[%d]",i);
+ dumpResources(Pci_Bus->resource[i]);
+ }
+ }
+}
+
+/*****************************************************
+ * Dump Device information for Debug
+ *****************************************************/
+void dumpPci_Dev(struct pci_dev* Pci_Dev)
+{
+ int i;
+ udbg_printf("\tpci_dev* = 0x%p\n",Pci_Dev);
+ if ( Pci_Dev == NULL ) return;
+ udbg_printf("\tname = %s \n",Pci_Dev->name);
+ udbg_printf("\tbus* = 0x%p\n",Pci_Dev->bus);
+ udbg_printf("\tsysdata* = 0x%p\n",Pci_Dev->sysdata);
+ udbg_printf("\tDevice = 0x%4X%02X:%02X.%02X 0x%04X:%04X\n",
+ PCI_GET_PHB_NUMBER(Pci_Dev),
+ PCI_GET_BUS_NUMBER(Pci_Dev),
+ PCI_SLOT(Pci_Dev->devfn),
+ PCI_FUNC(Pci_Dev->devfn),
+ Pci_Dev->vendor,
+ Pci_Dev->device);
+ udbg_printf("\tHdr/Irq = 0x%02X/0x%02X \n",Pci_Dev->hdr_type,Pci_Dev->irq);
+ for (i=0;i<DEVICE_COUNT_RESOURCE;++i) {
+ if (Pci_Dev->resource[i].start == 0 && Pci_Dev->resource[i].end == 0) continue;
+ udbg_printf("\tResources[%d] ",i);
+ dumpResources(&Pci_Dev->resource[i]);
+ }
+ dumpResources(&Pci_Dev->resource[i]);
+}
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __PPC_KERNEL_PCI_H__
+#define __PPC_KERNEL_PCI_H__
+
+#include <linux/pci.h>
+#include <asm/pci-bridge.h>
+
+extern unsigned long isa_io_base;
+extern unsigned long isa_mem_base;
+extern unsigned long pci_dram_offset;
+
+/*******************************************************************
+ * Platform independant variables referenced.
+ *******************************************************************
+ * Set pci_assign_all_busses to 1 if you want the kernel to re-assign
+ * all PCI bus numbers.
+ *******************************************************************/
+extern int pci_assign_all_busses;
+
+extern struct pci_controller* pci_alloc_pci_controller(char *model, enum phb_types controller_type);
+extern struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node);
+
+extern struct pci_controller* hose_head;
+extern struct pci_controller** hose_tail;
+/* PHB's are also in a table. */
+#define PCI_MAX_PHB 64
+extern int global_phb_number;
+extern struct pci_controller *phbtab[];
+
+/*******************************************************************
+ * Platform functions that are brand specific implementation.
+ *******************************************************************/
+extern unsigned long find_and_init_phbs(void);
+
+extern void fixup_resources(struct pci_dev *dev);
+extern void ppc64_pcibios_init(void);
+
+extern int pci_set_reset(struct pci_dev*,int);
+extern int device_Location(struct pci_dev*,char*);
+extern int format_device_location(struct pci_dev*,char*, int );
+
+extern struct pci_dev *ppc64_floppy_dev;
+
+/*******************************************************************
+ * PCI device_node operations
+ *******************************************************************/
+struct device_node;
+typedef void *(*traverse_func)(struct device_node *me, void *data);
+void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data);
+void *traverse_all_pci_devices(traverse_func pre);
+
+void pci_devs_phb_init(void);
+void pci_fix_bus_sysdata(void);
+struct device_node *fetch_dev_dn(struct pci_dev *dev);
+
+void iSeries_pcibios_init_early(void);
+void pSeries_pcibios_init_early(void);
+void pSeries_pcibios_init(void);
+
+/* Get a device_node from a pci_dev. This code must be fast except in the case
+ * where the sysdata is incorrect and needs to be fixed up (hopefully just once)
+ */
+static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
+{
+ struct device_node *dn = (struct device_node *)(dev->sysdata);
+ if (dn->devfn == dev->devfn && dn->busno == dev->bus->number)
+ return dn; /* fast path. sysdata is good */
+ else
+ return fetch_dev_dn(dev);
+}
+/* Use this macro after the PCI bus walk for max performance when it
+ * is known that sysdata is correct.
+ */
+#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
+
+
+/*******************************************************************
+ * Platform configuration flags.. (Live in pci.c)
+ *******************************************************************/
+extern int Pci_Large_Bus_System; /* System has > 256 buses */
+extern int Pci_Manage_Phb_Space; /* Manage Phb Space for IOAs*/
+
+/*******************************************************************
+ * Helper macros for extracting data from pci structures.
+ * PCI_GET_PHB_PTR(struct pci_dev*) returns the Phb pointer.
+ * PCI_GET_PHB_NUMBER(struct pci_dev*) returns the Phb number.
+ * PCI_GET_BUS_NUMBER(struct pci_dev*) returns the bus number.
+ *******************************************************************/
+#define PCI_GET_PHB_PTR(dev) (((struct device_node *)(dev)->sysdata)->phb)
+#define PCI_GET_PHB_NUMBER(dev) (((dev)->bus->number&0x00FFFF00)>>8)
+#define PCI_GET_BUS_NUMBER(dev) ((dev)->bus->number&0x0000FF)
+
+/*******************************************************************
+ * Debugging Routines.
+ *******************************************************************/
+extern void dumpResources(struct resource* Resource);
+extern void dumpPci_Controller(struct pci_controller* phb);
+extern void dumpPci_Bus(struct pci_bus* Pci_Bus);
+extern void dumpPci_Dev(struct pci_dev* Pci_Dev);
+
+extern void dump_Phb_tree(void);
+extern void dump_Bus_tree(void);
+extern void dump_Dev_tree(void);
+
+#endif /* __PPC_KERNEL_PCI_H__ */
--- /dev/null
+/*
+ * pci_dma.c
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ *
+ * Dynamic DMA mapping support.
+ *
+ * Manages the TCE space assigned to this partition.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/ppcdebug.h>
+
+#include <asm/iSeries/HvCallXm.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/pci_dma.h>
+#include <asm/pci-bridge.h>
+#include <asm/iSeries/iSeries_pci.h>
+
+#include <asm/machdep.h>
+
+#include "pci.h"
+
+// #define DEBUG_TCE 1
+
+/* Initialize so this guy does not end up in the BSS section.
+ * Only used to pass OF initialization data set in prom.c into the main
+ * kernel code -- data ultimately copied into tceTables[].
+ */
+extern struct _of_tce_table of_tce_table[];
+
+extern struct pci_controller* hose_head;
+extern struct pci_controller** hose_tail;
+
+struct TceTable virtBusVethTceTable; /* Tce table for virtual ethernet */
+struct TceTable virtBusVioTceTable; /* Tce table for virtual I/O */
+
+struct device_node iSeries_veth_dev_node = { tce_table: &virtBusVethTceTable };
+struct device_node iSeries_vio_dev_node = { tce_table: &virtBusVioTceTable };
+
+struct pci_dev iSeries_veth_dev_st = { sysdata: &iSeries_veth_dev_node };
+struct pci_dev iSeries_vio_dev_st = { sysdata: &iSeries_vio_dev_node };
+
+struct pci_dev * iSeries_veth_dev = &iSeries_veth_dev_st;
+struct pci_dev * iSeries_vio_dev = &iSeries_vio_dev_st;
+
+struct TceTable * tceTables[256]; /* Tce tables for 256 busses
+ * Bus 255 is the virtual bus
+ * zero indicates no bus defined
+ */
+/* allocates a contiguous range of tces (power-of-2 size) */
+static inline long alloc_tce_range(struct TceTable *,
+ unsigned order );
+
+/* allocates a contiguous range of tces (power-of-2 size)
+ * assumes lock already held
+ */
+static long alloc_tce_range_nolock(struct TceTable *,
+ unsigned order );
+
+/* frees a contiguous range of tces (power-of-2 size) */
+static inline void free_tce_range(struct TceTable *,
+ long tcenum,
+ unsigned order );
+
+/* frees a contiguous rnage of tces (power-of-2 size)
+ * assumes lock already held
+ */
+void free_tce_range_nolock(struct TceTable *,
+ long tcenum,
+ unsigned order );
+
+/* allocates a range of tces and sets them to the pages */
+static inline dma_addr_t get_tces( struct TceTable *,
+ unsigned order,
+ void *page,
+ unsigned numPages,
+ int direction );
+
+static long test_tce_range( struct TceTable *,
+ long tcenum,
+ unsigned order );
+
+static unsigned fill_scatterlist_sg(struct scatterlist *sg, int nents,
+ dma_addr_t dma_addr,
+ unsigned long numTces );
+
+static unsigned long num_tces_sg( struct scatterlist *sg,
+ int nents );
+
+static dma_addr_t create_tces_sg( struct TceTable *tbl,
+ struct scatterlist *sg,
+ int nents,
+ unsigned numTces,
+ int direction );
+
+static void getTceTableParmsPSeries( struct pci_controller *phb,
+ struct device_node *dn,
+ struct TceTable *tce_table_parms );
+
+static void getTceTableParmsPSeriesLP(struct pci_controller *phb,
+ struct device_node *dn,
+ struct TceTable *newTceTable );
+
+void create_pci_bus_tce_table( unsigned long token );
+
+u8 iSeries_Get_Bus( struct pci_dev * dv )
+{
+ return 0;
+}
+
+static inline struct TceTable *get_tce_table(struct pci_dev *dev) {
+
+ if ( ( _machine == _MACH_iSeries ) && ( dev->bus ) )
+ return tceTables[dev->bus->number];
+ /* On the iSeries, the virtual bus will take this path. There is a */
+ /* fake pci_dev and dev_node built and used. */
+ return PCI_GET_DN(dev)->tce_table;
+}
+
+static unsigned long __inline__ count_leading_zeros64( unsigned long x )
+{
+ unsigned long lz;
+ asm("cntlzd %0,%1" : "=r"(lz) : "r"(x));
+ return lz;
+}
+
+static void tce_build_iSeries(struct TceTable *tbl, long tcenum,
+ unsigned long uaddr, int direction )
+{
+ u64 setTceRc;
+ union Tce tce;
+
+ PPCDBG(PPCDBG_TCE, "build_tce: uaddr = 0x%lx\n", uaddr);
+ PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx, tbl = 0x%lx, index=%lx\n",
+ tcenum, tbl, tbl->index);
+
+ tce.wholeTce = 0;
+ tce.tceBits.rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
+
+ /* If for virtual bus */
+ if ( tbl->tceType == TCE_VB ) {
+ tce.tceBits.valid = 1;
+ tce.tceBits.allIo = 1;
+ if ( direction != PCI_DMA_TODEVICE )
+ tce.tceBits.readWrite = 1;
+ } else {
+ /* If for PCI bus */
+ tce.tceBits.readWrite = 1; // Read allowed
+ if ( direction != PCI_DMA_TODEVICE )
+ tce.tceBits.pciWrite = 1;
+ }
+
+ setTceRc = HvCallXm_setTce((u64)tbl->index,
+ (u64)tcenum,
+ tce.wholeTce );
+
+ if(setTceRc) {
+ printk("PCI: tce_build failed 0x%lx tcenum: 0x%lx\n", setTceRc, (u64)tcenum);
+ //PPCDBG(PPCDBG_TCE, "setTce failed. rc=%ld\n", setTceRc);
+ //PPCDBG(PPCDBG_TCE, "\tindex = 0x%lx\n", (u64)tbl->index);
+ //PPCDBG(PPCDBG_TCE, "\ttce num = 0x%lx\n", (u64)tcenum);
+ //PPCDBG(PPCDBG_TCE, "\ttce val = 0x%lx\n", tce.wholeTce );
+ }
+}
+
+static void tce_build_pSeries(struct TceTable *tbl, long tcenum,
+ unsigned long uaddr, int direction )
+{
+ union Tce tce;
+ union Tce *tce_addr;
+
+ PPCDBG(PPCDBG_TCE, "build_tce: uaddr = 0x%lx\n", uaddr);
+ PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx, tbl = 0x%lx, index=%lx\n",
+ tcenum, tbl, tbl->index);
+
+ tce.wholeTce = 0;
+ tce.tceBits.rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
+
+ tce.tceBits.readWrite = 1; // Read allowed
+ if ( direction != PCI_DMA_TODEVICE ) tce.tceBits.pciWrite = 1;
+
+ tce_addr = ((union Tce *)tbl->base) + tcenum;
+ *tce_addr = (union Tce)tce.wholeTce;
+
+ /* Make sure the update is visible to hardware. */
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/*
+ * Build a TceTable structure. This contains a multi-level bit map which
+ * is used to manage allocation of the tce space.
+ */
+static struct TceTable *build_tce_table( struct TceTable * tbl )
+{
+ unsigned long bits, bytes, totalBytes;
+ unsigned long numBits[NUM_TCE_LEVELS], numBytes[NUM_TCE_LEVELS];
+ unsigned i, k, m;
+ unsigned char * pos, * p, b;
+
+ PPCDBG(PPCDBG_TCEINIT, "build_tce_table: tbl = 0x%lx\n", tbl);
+ spin_lock_init( &(tbl->lock) );
+
+ tbl->mlbm.maxLevel = 0;
+
+ /* Compute number of bits and bytes for each level of the
+ * multi-level bit map
+ */
+ totalBytes = 0;
+ bits = tbl->size * (PAGE_SIZE / sizeof( union Tce ));
+
+ for ( i=0; i<NUM_TCE_LEVELS; ++i ) {
+ bytes = ((bits+63)/64) * 8;
+ PPCDBG(PPCDBG_TCEINIT, "build_tce_table: level %d bits=%ld, bytes=%ld\n", i, bits, bytes );
+ numBits[i] = bits;
+ numBytes[i] = bytes;
+ bits /= 2;
+ totalBytes += bytes;
+ }
+ PPCDBG(PPCDBG_TCEINIT, "build_tce_table: totalBytes=%ld\n", totalBytes );
+
+ pos = (char *)__get_free_pages( GFP_ATOMIC, get_order( totalBytes ));
+ if ( !pos )
+ return NULL;
+
+ memset( pos, 0, totalBytes );
+
+ /* For each level, fill in the pointer to the bit map,
+ * and turn on the last bit in the bit map (if the
+ * number of bits in the map is odd). The highest
+ * level will get all of its bits turned on.
+ */
+
+ for (i=0; i<NUM_TCE_LEVELS; ++i) {
+ if ( numBytes[i] ) {
+ tbl->mlbm.level[i].map = pos;
+ tbl->mlbm.maxLevel = i;
+
+ if ( numBits[i] & 1 ) {
+ p = pos + numBytes[i] - 1;
+ m = (( numBits[i] % 8) - 1) & 7;
+ *p = 0x80 >> m;
+ PPCDBG(PPCDBG_TCEINIT, "build_tce_table: level %d last bit %x\n", i, 0x80>>m );
+ }
+ }
+ else
+ tbl->mlbm.level[i].map = 0;
+ pos += numBytes[i];
+ tbl->mlbm.level[i].numBits = numBits[i];
+ tbl->mlbm.level[i].numBytes = numBytes[i];
+ }
+
+ /* For the highest level, turn on all the bits */
+
+ i = tbl->mlbm.maxLevel;
+ p = tbl->mlbm.level[i].map;
+ m = numBits[i];
+ PPCDBG(PPCDBG_TCEINIT, "build_tce_table: highest level (%d) has all bits set\n", i);
+ for (k=0; k<numBytes[i]; ++k) {
+ if ( m >= 8 ) {
+ /* handle full bytes */
+ *p++ = 0xff;
+ m -= 8;
+ }
+ else if(m>0) {
+ /* handle the last partial byte */
+ b = 0x80;
+ *p = 0;
+ while (m) {
+ *p |= b;
+ b >>= 1;
+ --m;
+ }
+ } else {
+ break;
+ }
+ }
+
+ return tbl;
+}
+
+static inline long alloc_tce_range( struct TceTable *tbl, unsigned order )
+{
+ long retval;
+ unsigned long flags;
+
+ /* Lock the tce allocation bitmap */
+ spin_lock_irqsave( &(tbl->lock), flags );
+
+ /* Do the actual work */
+ retval = alloc_tce_range_nolock( tbl, order );
+
+ /* Unlock the tce allocation bitmap */
+ spin_unlock_irqrestore( &(tbl->lock), flags );
+
+ return retval;
+}
+
+static long alloc_tce_range_nolock( struct TceTable *tbl, unsigned order )
+{
+ unsigned long numBits, numBytes;
+ unsigned long i, bit, block, mask;
+ long tcenum;
+ u64 * map;
+
+ /* If the order (power of 2 size) requested is larger than our
+ * biggest, indicate failure
+ */
+ if(order >= NUM_TCE_LEVELS) {
+ PPCDBG(PPCDBG_TCE,
+ "alloc_tce_range_nolock: invalid order: %d\n", order );
+ return -1;
+ }
+
+ numBits = tbl->mlbm.level[order].numBits;
+ numBytes = tbl->mlbm.level[order].numBytes;
+ map = (u64 *)tbl->mlbm.level[order].map;
+
+ /* Initialize return value to -1 (failure) */
+ tcenum = -1;
+
+ /* Loop through the bytes of the bitmap */
+ for (i=0; i<numBytes/8; ++i) {
+ if ( *map ) {
+ /* A free block is found, compute the block
+ * number (of this size)
+ */
+ bit = count_leading_zeros64( *map );
+ block = (i * 64) + bit;
+
+ /* turn off the bit in the map to indicate
+ * that the block is now in use
+ */
+ mask = 0x1UL << (63 - bit);
+ *map &= ~mask;
+
+ /* compute the index into our tce table for
+ * the first tce in the block
+ */
+ PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: allocating block %ld, (byte=%ld, bit=%ld) order %d\n", block, i, bit, order );
+ tcenum = block << order;
+ return tcenum;
+ }
+ ++map;
+ }
+
+#ifdef DEBUG_TCE
+ if ( tcenum == -1 ) {
+ PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: no available blocks of order = %d\n", order );
+ if ( order < tbl->mlbm.maxLevel )
+ PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: trying next bigger size\n" );
+ else
+ PPCDBG(PPCDBG_TCE, "alloc_tce_range_nolock: maximum size reached...failing\n");
+ }
+#endif
+
+ /* If no block of the requested size was found, try the next
+ * size bigger. If one of those is found, return the second
+ * half of the block to freespace and keep the first half
+ */
+ if((tcenum == -1) && (order < (NUM_TCE_LEVELS - 1))) {
+ tcenum = alloc_tce_range_nolock( tbl, order+1 );
+ if ( tcenum != -1 ) {
+ free_tce_range_nolock( tbl, tcenum+(1<<order), order );
+ }
+ }
+
+ /* Return the index of the first tce in the block
+ * (or -1 if we failed)
+ */
+ return tcenum;
+
+}
+
+static inline void free_tce_range(struct TceTable *tbl,
+ long tcenum, unsigned order )
+{
+ unsigned long flags;
+
+ /* Lock the tce allocation bitmap */
+ spin_lock_irqsave( &(tbl->lock), flags );
+
+ /* Do the actual work */
+ free_tce_range_nolock( tbl, tcenum, order );
+
+ /* Unlock the tce allocation bitmap */
+ spin_unlock_irqrestore( &(tbl->lock), flags );
+
+}
+
+void free_tce_range_nolock(struct TceTable *tbl,
+ long tcenum, unsigned order )
+{
+ unsigned long block;
+ unsigned byte, bit, mask, b;
+ unsigned char * map, * bytep;
+
+ if (order >= NUM_TCE_LEVELS) {
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range: invalid order: %d, tcenum = %d\n",
+ order, tcenum );
+ return;
+ }
+
+ block = tcenum >> order;
+
+#ifdef DEBUG_TCE
+ if ( tcenum != (block << order ) ) {
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range: tcenum %lx misaligned for order %x\n",
+ tcenum, order );
+ return;
+ }
+
+
+ if ( block >= tbl->mlbm.level[order].numBits ) {
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range: tcenum %lx is outside the range of this map (order %x, numBits %lx\n",
+ tcenum, order, tbl->mlbm.level[order].numBits );
+ return;
+ }
+
+
+ if ( test_tce_range( tbl, tcenum, order ) ) {
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range: freeing range not allocated.\n");
+ PPCDBG(PPCDBG_TCE,
+ "\tTceTable %p, tcenum %lx, order %x\n",
+ tbl, tcenum, order );
+ }
+#endif
+
+ map = tbl->mlbm.level[order].map;
+ byte = block / 8;
+ bit = block % 8;
+ mask = 0x80 >> bit;
+ bytep = map + byte;
+
+#ifdef DEBUG_TCE
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range_nolock: freeing block %ld (byte=%d, bit=%d) of order %d\n",
+ block, byte, bit, order);
+ if ( *bytep & mask )
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range: already free: TceTable %p, tcenum %lx, order %x\n",
+ tbl, tcenum, order );
+#endif
+
+ *bytep |= mask;
+
+ /* If there is a higher level in the bit map than this we may be
+ * able to buddy up this block with its partner.
+ * If this is the highest level we can't buddy up
+ * If this level has an odd number of bits and
+ * we are freeing the last block we can't buddy up
+ * Don't buddy up if it's in the first 1/4 of the level
+ */
+ if (( block > (tbl->mlbm.level[order].numBits/4) ) &&
+ (( block < tbl->mlbm.level[order].numBits-1 ) ||
+ ( 0 == ( tbl->mlbm.level[order].numBits & 1)))) {
+ /* See if we can buddy up the block we just freed */
+ bit &= 6; /* get to the first of the buddy bits */
+ mask = 0xc0 >> bit; /* build two bit mask */
+ b = *bytep & mask; /* Get the two bits */
+ if ( 0 == (b ^ mask) ) { /* If both bits are on */
+ /* both of the buddy blocks are free we can combine them */
+ *bytep ^= mask; /* turn off the two bits */
+ block = ( byte * 8 ) + bit; /* block of first of buddies */
+ tcenum = block << order;
+ /* free the buddied block */
+ PPCDBG(PPCDBG_TCE,
+ "free_tce_range: buddying blocks %ld & %ld\n",
+ block, block+1);
+ free_tce_range_nolock( tbl, tcenum, order+1 );
+ }
+ }
+}
+
+static long test_tce_range( struct TceTable *tbl, long tcenum, unsigned order )
+{
+ unsigned long block;
+ unsigned byte, bit, mask, b;
+ long retval, retLeft, retRight;
+ unsigned char * map;
+
+ map = tbl->mlbm.level[order].map;
+ block = tcenum >> order;
+ byte = block / 8; /* Byte within bitmap */
+ bit = block % 8; /* Bit within byte */
+ mask = 0x80 >> bit;
+ b = (*(map+byte) & mask ); /* 0 if block is allocated, else free */
+ if ( b )
+ retval = 1; /* 1 == block is free */
+ else
+ retval = 0; /* 0 == block is allocated */
+ /* Test bits at all levels below this to ensure that all agree */
+
+ if (order) {
+ retLeft = test_tce_range( tbl, tcenum, order-1 );
+ retRight = test_tce_range( tbl, tcenum+(1<<(order-1)), order-1 );
+ if ( retLeft || retRight ) {
+ retval = 2;
+ }
+ }
+
+ /* Test bits at all levels above this to ensure that all agree */
+
+ return retval;
+}
+
+static inline dma_addr_t get_tces( struct TceTable *tbl, unsigned order, void *page, unsigned numPages, int direction )
+{
+ long tcenum;
+ unsigned long uaddr;
+ unsigned i;
+ dma_addr_t retTce = NO_TCE;
+
+ uaddr = (unsigned long)page & PAGE_MASK;
+
+ /* Allocate a range of tces */
+ tcenum = alloc_tce_range( tbl, order );
+ if ( tcenum != -1 ) {
+ /* We got the tces we wanted */
+ tcenum += tbl->startOffset; /* Offset into real TCE table */
+ retTce = tcenum << PAGE_SHIFT; /* Set the return dma address */
+ /* Setup a tce for each page */
+ for (i=0; i<numPages; ++i) {
+ ppc_md.tce_build(tbl, tcenum, uaddr, direction);
+ ++tcenum;
+ uaddr += PAGE_SIZE;
+ }
+ }
+ else
+ PPCDBG(PPCDBG_TCE, "alloc_tce_range failed\n");
+ return retTce;
+}
+
+static void tce_free_iSeries(struct TceTable *tbl, dma_addr_t dma_addr,
+ unsigned order, unsigned numPages)
+{
+ u64 setTceRc;
+ long tcenum, freeTce, maxTcenum;
+ unsigned i;
+ union Tce tce;
+
+ maxTcenum = (tbl->size * (PAGE_SIZE / sizeof(union Tce))) - 1;
+
+ tcenum = dma_addr >> PAGE_SHIFT;
+
+ freeTce = tcenum - tbl->startOffset;
+
+ if ( freeTce > maxTcenum ) {
+ PPCDBG(PPCDBG_TCE, "free_tces: tcenum > maxTcenum\n");
+ PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx\n", tcenum);
+ PPCDBG(PPCDBG_TCE, "\tmaxTcenum = 0x%lx\n", maxTcenum);
+ PPCDBG(PPCDBG_TCE, "\tTCE Table = 0x%lx\n", (u64)tbl);
+ PPCDBG(PPCDBG_TCE, "\tbus# = 0x%lx\n", (u64)tbl->busNumber );
+ PPCDBG(PPCDBG_TCE, "\tsize = 0x%lx\n", (u64)tbl->size);
+ PPCDBG(PPCDBG_TCE, "\tstartOff = 0x%lx\n", (u64)tbl->startOffset );
+ PPCDBG(PPCDBG_TCE, "\tindex = 0x%lx\n", (u64)tbl->index);
+ return;
+ }
+
+ for (i=0; i<numPages; ++i) {
+ tce.wholeTce = 0;
+ setTceRc = HvCallXm_setTce((u64)tbl->index,
+ (u64)tcenum,
+ tce.wholeTce );
+
+ if ( setTceRc ) {
+ printk("PCI: tce_free failed 0x%lx tcenum: 0x%lx\n", setTceRc, (u64)tcenum);
+ //PPCDBG(PPCDBG_TCE, "tce_free: setTce failed\n");
+ //PPCDBG(PPCDBG_TCE, "\trc = 0x%lx\n", setTceRc);
+ //PPCDBG(PPCDBG_TCE, "\tindex = 0x%lx\n", (u64)tbl->index);
+ //PPCDBG(PPCDBG_TCE, "\ttce num = 0x%lx\n", (u64)tcenum);
+ //PPCDBG(PPCDBG_TCE, "\ttce val = 0x%lx\n", tce.wholeTce );
+ }
+
+ ++tcenum;
+ }
+
+ free_tce_range( tbl, freeTce, order );
+}
+
+static void tce_free_pSeries(struct TceTable *tbl, dma_addr_t dma_addr,
+ unsigned order, unsigned numPages)
+{
+ long tcenum, freeTce, maxTcenum;
+ unsigned i;
+ union Tce tce;
+ union Tce *tce_addr;
+
+ maxTcenum = (tbl->size * (PAGE_SIZE / sizeof(union Tce))) - 1;
+
+ tcenum = dma_addr >> PAGE_SHIFT;
+ // tcenum -= tbl->startOffset;
+
+ freeTce = tcenum - tbl->startOffset;
+
+ if ( freeTce > maxTcenum ) {
+ PPCDBG(PPCDBG_TCE, "free_tces: tcenum > maxTcenum\n");
+ PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx\n", tcenum);
+ PPCDBG(PPCDBG_TCE, "\tmaxTcenum = 0x%lx\n", maxTcenum);
+ PPCDBG(PPCDBG_TCE, "\tTCE Table = 0x%lx\n", (u64)tbl);
+ PPCDBG(PPCDBG_TCE, "\tbus# = 0x%lx\n",
+ (u64)tbl->busNumber );
+ PPCDBG(PPCDBG_TCE, "\tsize = 0x%lx\n", (u64)tbl->size);
+ PPCDBG(PPCDBG_TCE, "\tstartOff = 0x%lx\n",
+ (u64)tbl->startOffset );
+ PPCDBG(PPCDBG_TCE, "\tindex = 0x%lx\n", (u64)tbl->index);
+ return;
+ }
+
+ for (i=0; i<numPages; ++i) {
+ tce.wholeTce = 0;
+
+ tce_addr = ((union Tce *)tbl->base) + tcenum;
+ *tce_addr = (union Tce)tce.wholeTce;
+
+ ++tcenum;
+ }
+
+ /* Make sure the update is visible to hardware. */
+ __asm__ __volatile__ ("sync" : : : "memory");
+
+ free_tce_range( tbl, freeTce, order );
+}
+
+void __init create_virtual_bus_tce_table(void)
+{
+ struct TceTable *t;
+ struct TceTableManagerCB virtBusTceTableParms;
+ u64 absParmsPtr;
+
+ virtBusTceTableParms.busNumber = 255; /* Bus 255 is the virtual bus */
+ virtBusTceTableParms.virtualBusFlag = 0xff; /* Ask for virtual bus */
+
+ absParmsPtr = virt_to_absolute( (u64)&virtBusTceTableParms );
+ HvCallXm_getTceTableParms( absParmsPtr );
+
+ virtBusVethTceTable.size = virtBusTceTableParms.size / 2;
+ virtBusVethTceTable.busNumber = virtBusTceTableParms.busNumber;
+ virtBusVethTceTable.startOffset = virtBusTceTableParms.startOffset;
+ virtBusVethTceTable.index = virtBusTceTableParms.index;
+ virtBusVethTceTable.tceType = TCE_VB;
+
+ virtBusVioTceTable.size = virtBusTceTableParms.size - virtBusVethTceTable.size;
+ virtBusVioTceTable.busNumber = virtBusTceTableParms.busNumber;
+ virtBusVioTceTable.startOffset = virtBusTceTableParms.startOffset +
+ virtBusVethTceTable.size * (PAGE_SIZE/sizeof(union Tce));
+ virtBusVioTceTable.index = virtBusTceTableParms.index;
+ virtBusVioTceTable.tceType = TCE_VB;
+
+ t = build_tce_table( &virtBusVethTceTable );
+ if ( t ) {
+ tceTables[255] = t;
+ printk( "Virtual Bus VETH TCE table built successfully.\n");
+ printk( " TCE table size = %ld entries\n",
+ (unsigned long)t->size*(PAGE_SIZE/sizeof(union Tce)) );
+ printk( " TCE table token = %d\n",
+ (unsigned)t->index );
+ printk( " TCE table start entry = 0x%lx\n",
+ (unsigned long)t->startOffset );
+ }
+ else
+ printk( "Virtual Bus VETH TCE table failed.\n");
+
+ t = build_tce_table( &virtBusVioTceTable );
+ if ( t ) {
+ printk( "Virtual Bus VIO TCE table built successfully.\n");
+ printk( " TCE table size = %ld entries\n",
+ (unsigned long)t->size*(PAGE_SIZE/sizeof(union Tce)) );
+ printk( " TCE table token = %d\n",
+ (unsigned)t->index );
+ printk( " TCE table start entry = 0x%lx\n",
+ (unsigned long)t->startOffset );
+ }
+ else
+ printk( "Virtual Bus VIO TCE table failed.\n");
+}
+
+void create_tce_tables_for_buses(struct list_head *bus_list)
+{
+ struct pci_controller* phb;
+ struct device_node *dn, *first_dn;
+ int num_slots, num_slots_ilog2;
+ int first_phb = 1;
+
+ for (phb=hose_head;phb;phb=phb->next) {
+ first_dn = ((struct device_node *)phb->arch_data)->child;
+ /* Carve 2GB into the largest dma_window_size possible */
+ for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling)
+ num_slots++;
+ num_slots_ilog2 = __ilog2(num_slots);
+ if ((1<<num_slots_ilog2) != num_slots)
+ num_slots_ilog2++;
+ phb->dma_window_size = 1 << (22 - num_slots_ilog2);
+ /* Reserve 16MB of DMA space on the first PHB.
+ * We should probably be more careful and use firmware props.
+ * In reality this space is remapped, not lost. But we don't
+ * want to get that smart to handle it -- too much work.
+ */
+ phb->dma_window_base_cur = first_phb ? (1 << 12) : 0;
+ first_phb = 0;
+ for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling) {
+ create_pci_bus_tce_table((unsigned long)dn);
+ }
+ }
+}
+
+void create_tce_tables_for_busesLP(struct list_head *bus_list)
+{
+ struct list_head *ln;
+ struct pci_bus *bus;
+ struct device_node *busdn;
+ u32 *dma_window;
+ for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
+ bus = pci_bus_b(ln);
+ busdn = PCI_GET_DN(bus);
+ /* NOTE: there should never be a window declared on a bus when
+ * child devices also have a window. If this should ever be
+ * architected, we probably want children to have priority.
+ * In reality, the PHB containing ISA has the property, but otherwise
+ * it is the pci-bridges that have the property.
+ */
+ dma_window = (u32 *)get_property(busdn, "ibm,dma-window", 0);
+ if (dma_window) {
+ /* Busno hasn't been copied yet.
+ * Do it now because getTceTableParmsPSeriesLP needs it.
+ */
+ busdn->busno = bus->number;
+ create_pci_bus_tce_table((unsigned long)busdn);
+ } else
+ create_tce_tables_for_busesLP(&bus->children);
+ }
+}
+
+void create_tce_tables(void) {
+ struct pci_dev *dev;
+ struct device_node *dn, *mydn;
+
+ if (_machine == _MACH_pSeriesLP)
+ create_tce_tables_for_busesLP(&pci_root_buses);
+ else
+ create_tce_tables_for_buses(&pci_root_buses);
+
+ /* Now copy the tce_table ptr from the bus devices down to every
+ * pci device_node. This means get_tce_table() won't need to search
+ * up the device tree to find it.
+ */
+ pci_for_each_dev(dev) {
+ mydn = dn = PCI_GET_DN(dev);
+ while (dn && dn->tce_table == NULL)
+ dn = dn->parent;
+ if (dn) {
+ mydn->tce_table = dn->tce_table;
+ }
+ }
+}
+
+/*
+ * iSeries token = busNumber
+ * pSeries token = pci_controller*
+ */
+void create_pci_bus_tce_table( unsigned long token ) {
+ struct TceTable * builtTceTable;
+ struct TceTable * newTceTable;
+ struct TceTableManagerCB pciBusTceTableParms;
+ u64 parmsPtr;
+
+ PPCDBG(PPCDBG_TCE, "Entering create_pci_bus_tce_table.\n");
+ PPCDBG(PPCDBG_TCE, "\ttoken = 0x%lx\n", token);
+
+ newTceTable = kmalloc( sizeof(struct TceTable), GFP_KERNEL );
+
+ if(_machine == _MACH_iSeries) {
+ if ( token > 254 ) {
+ printk("PCI: Bus TCE table failed, invalid bus number %lu\n", token );
+ return;
+ }
+
+ pciBusTceTableParms.busNumber = token;
+ pciBusTceTableParms.virtualBusFlag = 0;
+ parmsPtr = virt_to_absolute( (u64)&pciBusTceTableParms );
+
+ /*
+ * Call HV with the architected data structure to get TCE table
+ * info. Put the returned data into the Linux representation
+ * of the TCE table data.
+ */
+ HvCallXm_getTceTableParms( parmsPtr );
+ printk("PCI: getTceTableParms: Bus: 0x%lx Size: 0x%lx, Start: 0x%lx, Index: 0x%lx\n",
+ pciBusTceTableParms.busNumber,
+ pciBusTceTableParms.size,
+ pciBusTceTableParms.startOffset,
+ pciBusTceTableParms.index);
+
+ /* Determine if the table identified by the index and startOffset */
+ /* returned by the hypervisor for this bus has already been created. */
+ /* If so, set the tceTable entry to point to the linux shared tceTable.*/
+ int BusIndex;
+ for ( BusIndex=0; BusIndex<255; ++BusIndex) {
+ if (tceTables[BusIndex] != NULL) {
+ struct TceTable* CmprTceTable = tceTables[BusIndex];
+ if ( ( CmprTceTable->index == pciBusTceTableParms.index ) &&
+ ( CmprTceTable->startOffset == pciBusTceTableParms.startOffset ) ) {
+ tceTables[token] = CmprTceTable;
+ printk("PCI: Bus %lu Shares a TCE table with bus %d\n",token,BusIndex);
+ break;
+ }
+ }
+ }
+ /* No shared table, build a new table for this bus. */
+ if (tceTables[token] == NULL) {
+ newTceTable->size = pciBusTceTableParms.size;
+ newTceTable->busNumber = pciBusTceTableParms.busNumber;
+ newTceTable->startOffset = pciBusTceTableParms.startOffset;
+ newTceTable->index = pciBusTceTableParms.index;
+
+ builtTceTable = build_tce_table( newTceTable );
+ builtTceTable->tceType = TCE_PCI;
+ tceTables[token] = builtTceTable;
+ }
+ else {
+ /* We're using the shared table, not this new one. */
+ kfree(newTceTable);
+ }
+
+ printk("PCI: Pci bus %lu TceTable: %p\n",token,tceTables[token]);
+ return;
+ } else {
+ struct device_node *dn;
+ struct pci_controller *phb;
+
+ dn = (struct device_node *)token;
+ phb = dn->phb;
+ if (_machine == _MACH_pSeries)
+ getTceTableParmsPSeries(phb, dn, newTceTable);
+ else
+ getTceTableParmsPSeriesLP(phb, dn, newTceTable);
+ builtTceTable = build_tce_table( newTceTable );
+ dn->tce_table = builtTceTable;
+ }
+
+ if(builtTceTable == NULL ) {
+ kfree( newTceTable );
+ PPCDBG(PPCDBG_TCE, "PCI Bus TCE table failed.\n");
+ return;
+ }
+}
+
+static void getTceTableParmsPSeries(struct pci_controller *phb,
+ struct device_node *dn,
+ struct TceTable *newTceTable ) {
+ phandle node;
+ unsigned long i;
+
+ node = ((struct device_node *)(phb->arch_data))->node;
+
+ PPCDBG(PPCDBG_TCEINIT, "getTceTableParms: start\n");
+ PPCDBG(PPCDBG_TCEINIT, "\tof_tce_table = 0x%lx\n", of_tce_table);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb = 0x%lx\n", phb);
+ PPCDBG(PPCDBG_TCEINIT, "\tdn = 0x%lx\n", dn);
+ PPCDBG(PPCDBG_TCEINIT, "\tdn->name = %s\n", dn->name);
+ PPCDBG(PPCDBG_TCEINIT, "\tdn->full_name= %s\n", dn->full_name);
+ PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable = 0x%lx\n", newTceTable);
+ PPCDBG(PPCDBG_TCEINIT, "\tdma_window_size = 0x%lx\n", phb->dma_window_size);
+
+ i = 0;
+ while(of_tce_table[i].node) {
+ PPCDBG(PPCDBG_TCEINIT, "\tof_tce_table[%d].node = 0x%lx\n",
+ i, of_tce_table[i].node);
+ PPCDBG(PPCDBG_TCEINIT, "\tof_tce_table[%d].base = 0x%lx\n",
+ i, of_tce_table[i].base);
+ PPCDBG(PPCDBG_TCEINIT, "\tof_tce_table[%d].size = 0x%lx\n",
+ i, of_tce_table[i].size >> PAGE_SHIFT);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb->arch_data->node = 0x%lx\n",
+ node);
+
+ if(of_tce_table[i].node == node) {
+ memset((void *)of_tce_table[i].base,
+ 0, of_tce_table[i].size);
+ newTceTable->busNumber = phb->bus->number;
+
+ /* Units of tce entries. */
+ newTceTable->startOffset = phb->dma_window_base_cur;
+
+ /* Adjust the current table offset to the next */
+ /* region. Measured in TCE entries. Force an */
+ /* alignment to the size alloted per IOA. This */
+ /* makes it easier to remove the 1st 16MB. */
+ phb->dma_window_base_cur += (phb->dma_window_size>>3);
+ phb->dma_window_base_cur &=
+ ~((phb->dma_window_size>>3)-1);
+
+ /* Set the tce table size - measured in units */
+ /* of pages of tce table. */
+ newTceTable->size = ((phb->dma_window_base_cur -
+ newTceTable->startOffset) << 3)
+ >> PAGE_SHIFT;
+
+ /* Test if we are going over 2GB of DMA space. */
+ if(phb->dma_window_base_cur > (1 << 19)) {
+ udbg_printf("Unexpected number of IOAs under this PHB");
+ panic("Unexpected number of IOAs under this PHB");
+ }
+
+ newTceTable->base = of_tce_table[i].base;
+ newTceTable->index = 0;
+
+ PPCDBG(PPCDBG_TCEINIT,
+ "\tnewTceTable->base = 0x%lx\n",
+ newTceTable->base);
+ PPCDBG(PPCDBG_TCEINIT,
+ "\tnewTceTable->startOffset = 0x%lx"
+ "(# tce entries)\n",
+ newTceTable->startOffset);
+ PPCDBG(PPCDBG_TCEINIT,
+ "\tnewTceTable->size = 0x%lx"
+ "(# pages of tce table)\n",
+ newTceTable->size);
+ }
+ i++;
+ }
+}
+
+/*
+ * getTceTableParmsPSeriesLP
+ *
+ * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
+ *
+ * ToDo: properly interpret the ibm,dma-window property. The definition is:
+ * logical-bus-number (1 word)
+ * phys-address (#address-cells words)
+ * size (#cell-size words)
+ *
+ * Currently we hard code these sizes (more or less).
+ */
+static void getTceTableParmsPSeriesLP(struct pci_controller *phb,
+ struct device_node *dn,
+ struct TceTable *newTceTable ) {
+ u32 *dma_window = (u32 *)get_property(dn, "ibm,dma-window", 0);
+ if (!dma_window) {
+ panic("getTceTableParmsPSeriesLP: device %s has no ibm,dma-window property!\n", dn->full_name);
+ }
+
+ newTceTable->busNumber = dn->busno;
+ newTceTable->size = (((((unsigned long)dma_window[4] << 32) | (unsigned long)dma_window[5]) >> PAGE_SHIFT) << 3) >> PAGE_SHIFT;
+ newTceTable->startOffset = ((((unsigned long)dma_window[2] << 32) | (unsigned long)dma_window[3]) >> 12);
+ newTceTable->base = 0;
+ newTceTable->index = dma_window[0];
+ PPCDBG(PPCDBG_TCEINIT, "getTceTableParmsPSeriesLP for bus 0x%lx:\n", dn->busno);
+ PPCDBG(PPCDBG_TCEINIT, "\tDevice = %s\n", dn->full_name);
+ PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->index = 0x%lx\n", newTceTable->index);
+ PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->startOffset = 0x%lx\n", newTceTable->startOffset);
+ PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->size = 0x%lx\n", newTceTable->size);
+}
+
+/* Allocates a contiguous real buffer and creates TCEs over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (tce) of the first page.
+ */
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ struct TceTable * tbl;
+ void *ret = NULL;
+ unsigned order, nPages;
+ dma_addr_t tce;
+
+ PPCDBG(PPCDBG_TCE, "pci_alloc_consistent:\n");
+ PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx\n", hwdev);
+ PPCDBG(PPCDBG_TCE, "\tsize = 0x%16.16lx\n", size);
+ PPCDBG(PPCDBG_TCE, "\tdma_handle = 0x%16.16lx\n", dma_handle);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ nPages = 1 << order;
+
+ tbl = get_tce_table(hwdev);
+
+ if ( tbl ) {
+ /* Alloc enough pages (and possibly more) */
+ ret = (void *)__get_free_pages( GFP_ATOMIC, order );
+ if ( ret ) {
+ /* Page allocation succeeded */
+ memset(ret, 0, nPages << PAGE_SHIFT);
+ /* Set up tces to cover the allocated range */
+ tce = get_tces( tbl, order, ret, nPages, PCI_DMA_BIDIRECTIONAL );
+ if ( tce == NO_TCE ) {
+ PPCDBG(PPCDBG_TCE, "pci_alloc_consistent: get_tces failed\n" );
+ free_pages( (unsigned long)ret, order );
+ ret = NULL;
+ }
+ else
+ {
+ *dma_handle = tce;
+ }
+ }
+ else PPCDBG(PPCDBG_TCE, "pci_alloc_consistent: __get_free_pages failed for order = %d\n", order);
+ }
+ else PPCDBG(PPCDBG_TCE, "pci_alloc_consistent: get_tce_table failed for 0x%016lx\n", hwdev);
+
+ PPCDBG(PPCDBG_TCE, "\tpci_alloc_consistent: dma_handle = 0x%16.16lx\n", *dma_handle);
+ PPCDBG(PPCDBG_TCE, "\tpci_alloc_consistent: return = 0x%16.16lx\n", ret);
+ return ret;
+}
+
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct TceTable * tbl;
+ unsigned order, nPages;
+
+ PPCDBG(PPCDBG_TCE, "pci_free_consistent:\n");
+ PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, size = 0x%16.16lx, dma_handle = 0x%16.16lx, vaddr = 0x%16.16lx\n", hwdev, size, dma_handle, vaddr);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ nPages = 1 << order;
+
+ if ( order > 10 )
+ PPCDBG(PPCDBG_TCE, "pci_free_consistent: order=%d, size=%d, nPages=%d, dma_handle=%016lx, vaddr=%016lx\n",
+ order, size, nPages, (unsigned long)dma_handle, (unsigned long)vaddr );
+
+ tbl = get_tce_table(hwdev);
+
+ if ( tbl ) {
+ ppc_md.tce_free(tbl, dma_handle, order, nPages);
+ free_pages( (unsigned long)vaddr, order );
+ }
+}
+
+/* Creates TCEs for a user provided buffer. The user buffer must be
+ * contiguous real kernel storage (not vmalloc). The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer. The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+dma_addr_t pci_map_single(struct pci_dev *hwdev, void *vaddr,
+ size_t size, int direction )
+{
+ struct TceTable * tbl;
+ dma_addr_t dma_handle = NO_TCE;
+ unsigned long uaddr;
+ unsigned order, nPages;
+
+ PPCDBG(PPCDBG_TCE, "pci_map_single:\n");
+ PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, size = 0x%16.16lx, direction = 0x%16.16lx, vaddr = 0x%16.16lx\n", hwdev, size, direction, vaddr);
+ if ( direction == PCI_DMA_NONE )
+ BUG();
+
+ uaddr = (unsigned long)vaddr;
+ nPages = PAGE_ALIGN( uaddr + size ) - ( uaddr & PAGE_MASK );
+ order = get_order( nPages & PAGE_MASK );
+ nPages >>= PAGE_SHIFT;
+
+ tbl = get_tce_table(hwdev);
+
+ if ( tbl ) {
+ dma_handle = get_tces( tbl, order, vaddr, nPages, direction );
+ dma_handle |= ( uaddr & ~PAGE_MASK );
+ }
+
+ return dma_handle;
+}
+
+void pci_unmap_single( struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction )
+{
+ struct TceTable * tbl;
+ unsigned order, nPages;
+
+ PPCDBG(PPCDBG_TCE, "pci_unmap_single:\n");
+ PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, size = 0x%16.16lx, direction = 0x%16.16lx, dma_handle = 0x%16.16lx\n", hwdev, size, direction, dma_handle);
+ if ( direction == PCI_DMA_NONE )
+ BUG();
+
+ nPages = PAGE_ALIGN( dma_handle + size ) - ( dma_handle & PAGE_MASK );
+ order = get_order( nPages & PAGE_MASK );
+ nPages >>= PAGE_SHIFT;
+
+ if ( order > 10 )
+ PPCDBG(PPCDBG_TCE, "pci_unmap_single: order=%d, size=%d, nPages=%d, dma_handle=%016lx\n",
+ order, size, nPages, (unsigned long)dma_handle );
+
+ tbl = get_tce_table(hwdev);
+
+ if ( tbl )
+ ppc_md.tce_free(tbl, dma_handle, order, nPages);
+
+}
+
+#if 0
+/* Figure out how many TCEs are actually going to be required
+ * to map this scatterlist. This code is not optimal. It
+ * takes into account the case where entry n ends in the same
+ * page in which entry n+1 starts. It does not handle the
+ * general case of entry n ending in the same page in which
+ * entry m starts.
+ */
+static unsigned long num_tces_sg( struct scatterlist *sg, int nents )
+{
+ unsigned long nTces, numPages, startPage, endPage, prevEndPage;
+ unsigned i;
+
+ prevEndPage = 0;
+ nTces = 0;
+
+ for (i=0; i<nents; ++i) {
+ /* Compute the starting page number and
+ * the ending page number for this entry
+ */
+ startPage = (unsigned long)sg->address >> PAGE_SHIFT;
+ endPage = ((unsigned long)sg->address + sg->length - 1) >> PAGE_SHIFT;
+ numPages = endPage - startPage + 1;
+ /* Simple optimization: if the previous entry ended
+ * in the same page in which this entry starts
+ * then we can reduce the required pages by one.
+ * This matches assumptions in fill_scatterlist_sg and
+ * create_tces_sg
+ */
+ if ( startPage == prevEndPage )
+ --numPages;
+ nTces += numPages;
+ prevEndPage = endPage;
+ sg++;
+ }
+ return nTces;
+}
+
+/* Fill in the dma data in the scatterlist
+ * return the number of dma sg entries created
+ */
+static unsigned fill_scatterlist_sg( struct scatterlist *sg, int nents,
+ dma_addr_t dma_addr , unsigned long numTces)
+{
+ struct scatterlist *dma_sg;
+ u32 cur_start_dma;
+ unsigned long cur_len_dma, cur_end_virt, uaddr;
+ unsigned num_dma_ents;
+
+ dma_sg = sg;
+ num_dma_ents = 1;
+
+ /* Process the first sg entry */
+ cur_start_dma = dma_addr + ((unsigned long)sg->address & (~PAGE_MASK));
+ cur_len_dma = sg->length;
+ /* cur_end_virt holds the address of the byte immediately after the
+ * end of the current buffer.
+ */
+ cur_end_virt = (unsigned long)sg->address + cur_len_dma;
+ /* Later code assumes that unused sg->dma_address and sg->dma_length
+ * fields will be zero. Other archs seem to assume that the user
+ * (device driver) guarantees that...I don't want to depend on that
+ */
+ sg->dma_address = sg->dma_length = 0;
+
+ /* Process the rest of the sg entries */
+ while (--nents) {
+ ++sg;
+ /* Clear possibly unused fields. Note: sg >= dma_sg so
+ * this can't be clearing a field we've already set
+ */
+ sg->dma_address = sg->dma_length = 0;
+
+ /* Check if it is possible to make this next entry
+ * contiguous (in dma space) with the previous entry.
+ */
+
+ /* The entries can be contiguous in dma space if
+ * the previous entry ends immediately before the
+ * start of the current entry (in virtual space)
+ * or if the previous entry ends at a page boundary
+ * and the current entry starts at a page boundary.
+ */
+ uaddr = (unsigned long)sg->address;
+ if ( ( uaddr != cur_end_virt ) &&
+ ( ( ( uaddr | cur_end_virt ) & (~PAGE_MASK) ) ||
+ ( ( uaddr & PAGE_MASK ) == ( ( cur_end_virt-1 ) & PAGE_MASK ) ) ) ) {
+ /* This entry can not be contiguous in dma space.
+ * save the previous dma entry and start a new one
+ */
+ dma_sg->dma_address = cur_start_dma;
+ dma_sg->dma_length = cur_len_dma;
+
+ ++dma_sg;
+ ++num_dma_ents;
+
+ cur_start_dma += cur_len_dma-1;
+ /* If the previous entry ends and this entry starts
+ * in the same page then they share a tce. In that
+ * case don't bump cur_start_dma to the next page
+ * in dma space. This matches assumptions made in
+ * num_tces_sg and create_tces_sg.
+ */
+ if ((uaddr & PAGE_MASK) == ((cur_end_virt-1) & PAGE_MASK))
+ cur_start_dma &= PAGE_MASK;
+ else
+ cur_start_dma = PAGE_ALIGN(cur_start_dma+1);
+ cur_start_dma += ( uaddr & (~PAGE_MASK) );
+ cur_len_dma = 0;
+ }
+ /* Accumulate the length of this entry for the next
+ * dma entry
+ */
+ cur_len_dma += sg->length;
+ cur_end_virt = uaddr + sg->length;
+ }
+ /* Fill in the last dma entry */
+ dma_sg->dma_address = cur_start_dma;
+ dma_sg->dma_length = cur_len_dma;
+
+ if ((((cur_start_dma +cur_len_dma - 1)>> PAGE_SHIFT) - (dma_addr >> PAGE_SHIFT) + 1) != numTces)
+ {
+ PPCDBG(PPCDBG_TCE, "fill_scatterlist_sg: numTces %ld, used tces %d\n",
+ numTces,
+ (unsigned)(((cur_start_dma + cur_len_dma - 1) >> PAGE_SHIFT) - (dma_addr >> PAGE_SHIFT) + 1));
+ }
+
+
+ return num_dma_ents;
+}
+
+/* Call the hypervisor to create the TCE entries.
+ * return the number of TCEs created
+ */
+static dma_addr_t create_tces_sg( struct TceTable *tbl, struct scatterlist *sg,
+ int nents, unsigned numTces, int direction )
+{
+ unsigned order, i, j;
+ unsigned long startPage, endPage, prevEndPage, numPages, uaddr;
+ long tcenum, starttcenum;
+ dma_addr_t dmaAddr;
+
+ dmaAddr = NO_TCE;
+
+ order = get_order( numTces << PAGE_SHIFT );
+ /* allocate a block of tces */
+ tcenum = alloc_tce_range( tbl, order );
+ if ( tcenum != -1 ) {
+ tcenum += tbl->startOffset;
+ starttcenum = tcenum;
+ dmaAddr = tcenum << PAGE_SHIFT;
+ prevEndPage = 0;
+ for (j=0; j<nents; ++j) {
+ startPage = (unsigned long)sg->address >> PAGE_SHIFT;
+ endPage = ((unsigned long)sg->address + sg->length - 1) >> PAGE_SHIFT;
+ numPages = endPage - startPage + 1;
+
+ uaddr = (unsigned long)sg->address;
+
+ /* If the previous entry ended in the same page that
+ * the current page starts then they share that
+ * tce and we reduce the number of tces we need
+ * by one. This matches assumptions made in
+ * num_tces_sg and fill_scatterlist_sg
+ */
+ if ( startPage == prevEndPage ) {
+ --numPages;
+ uaddr += PAGE_SIZE;
+ }
+
+ for (i=0; i<numPages; ++i) {
+ ppc_md.tce_build(tbl, tcenum, uaddr, direction);
+ ++tcenum;
+ uaddr += PAGE_SIZE;
+ }
+
+ prevEndPage = endPage;
+ sg++;
+ }
+ if ((tcenum - starttcenum) != numTces)
+ PPCDBG(PPCDBG_TCE, "create_tces_sg: numTces %d, tces used %d\n",
+ numTces, (unsigned)(tcenum - starttcenum));
+
+ }
+
+ return dmaAddr;
+}
+
+int pci_map_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction )
+{
+ struct TceTable * tbl;
+ unsigned numTces;
+ int num_dma;
+ dma_addr_t dma_handle;
+
+ PPCDBG(PPCDBG_TCE, "pci_map_sg:\n");
+ PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, sg = 0x%16.16lx, direction = 0x%16.16lx, nents = 0x%16.16lx\n", hwdev, sg, direction, nents);
+ /* Fast path for a single entry scatterlist */
+ if ( nents == 1 ) {
+ sg->dma_address = pci_map_single( hwdev, sg->address,
+ sg->length, direction );
+ sg->dma_length = sg->length;
+ return 1;
+ }
+
+ if ( direction == PCI_DMA_NONE )
+ BUG();
+
+ tbl = get_tce_table(hwdev);
+
+ if ( tbl ) {
+ /* Compute the number of tces required */
+ numTces = num_tces_sg( sg, nents );
+ /* Create the tces and get the dma address */
+ dma_handle = create_tces_sg( tbl, sg, nents, numTces, direction );
+
+ /* Fill in the dma scatterlist */
+ num_dma = fill_scatterlist_sg( sg, nents, dma_handle, numTces );
+ }
+
+ return num_dma;
+}
+
+void pci_unmap_sg( struct pci_dev *hwdev, struct scatterlist *sg, int nelms, int direction )
+{
+ struct TceTable * tbl;
+ unsigned order, numTces, i;
+ dma_addr_t dma_end_page, dma_start_page;
+
+ PPCDBG(PPCDBG_TCE, "pci_unmap_sg:\n");
+ PPCDBG(PPCDBG_TCE, "\thwdev = 0x%16.16lx, sg = 0x%16.16lx, direction = 0x%16.16lx, nelms = 0x%16.16lx\n", hwdev, sg, direction, nelms);
+
+ if ( direction == PCI_DMA_NONE )
+ BUG();
+
+ dma_start_page = sg->dma_address & PAGE_MASK;
+ for ( i=nelms; i>0; --i ) {
+ unsigned k = i - 1;
+ if ( sg[k].dma_length ) {
+ dma_end_page = ( sg[k].dma_address +
+ sg[k].dma_length - 1 ) & PAGE_MASK;
+ break;
+ }
+ }
+
+ numTces = ((dma_end_page - dma_start_page ) >> PAGE_SHIFT) + 1;
+ order = get_order( numTces << PAGE_SHIFT );
+
+ if ( order > 10 )
+ PPCDBG(PPCDBG_TCE, "pci_unmap_sg: order=%d, numTces=%d, nelms=%d, dma_start_page=%016lx, dma_end_page=%016lx\n",
+ order, numTces, nelms, (unsigned long)dma_start_page, (unsigned long)dma_end_page );
+
+ tbl = get_tce_table(hwdev);
+
+ if ( tbl )
+ ppc_md.tce_free( tbl, dma_start_page, order, numTces );
+
+}
+#else
+int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
+ int direction)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++) {
+ unsigned long vaddr = (page_address(sglist->page) +
+ sglist->offset);
+
+ sglist->dma_address = pci_map_single(pdev, vaddr,
+ sglist->length, direction);
+ sglist->dma_length = sglist->length;
+ sglist++;
+ }
+
+ return nelems;
+}
+
+void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
+ int direction)
+{
+ while (nelems--) {
+ pci_unmap_single(pdev, sglist->dma_address,
+ sglist->dma_length, direction);
+ sglist++;
+ }
+}
+#endif
+
+/*
+ * phb_tce_table_init
+ *
+ * Function: Display TCE config registers. Could be easily changed
+ * to initialize the hardware to use TCEs.
+ */
+unsigned long phb_tce_table_init(struct pci_controller *phb) {
+ unsigned int r, cfg_rw, i;
+ unsigned long r64;
+ phandle node;
+
+ PPCDBG(PPCDBG_TCE, "phb_tce_table_init: start.\n");
+
+ node = ((struct device_node *)(phb->arch_data))->node;
+
+ PPCDBG(PPCDBG_TCEINIT, "\tphb = 0x%lx\n", phb);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb->type = 0x%lx\n", phb->type);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb->phb_regs = 0x%lx\n", phb->phb_regs);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb->chip_regs = 0x%lx\n", phb->chip_regs);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb: node = 0x%lx\n", node);
+ PPCDBG(PPCDBG_TCEINIT, "\tphb->arch_data = 0x%lx\n", phb->arch_data);
+
+ i = 0;
+ while(of_tce_table[i].node) {
+ if(of_tce_table[i].node == node) {
+ if(phb->type == phb_type_python) {
+ r = *(((unsigned int *)phb->phb_regs) + (0xf10>>2));
+ PPCDBG(PPCDBG_TCEINIT, "\tTAR(low) = 0x%x\n", r);
+ r = *(((unsigned int *)phb->phb_regs) + (0xf00>>2));
+ PPCDBG(PPCDBG_TCEINIT, "\tTAR(high) = 0x%x\n", r);
+ r = *(((unsigned int *)phb->phb_regs) + (0xfd0>>2));
+ PPCDBG(PPCDBG_TCEINIT, "\tPHB cfg(rw) = 0x%x\n", r);
+ break;
+ } else if(phb->type == phb_type_speedwagon) {
+ r64 = *(((unsigned long *)phb->chip_regs) +
+ (0x800>>3));
+ PPCDBG(PPCDBG_TCEINIT, "\tNCFG = 0x%lx\n", r64);
+ r64 = *(((unsigned long *)phb->chip_regs) +
+ (0x580>>3));
+ PPCDBG(PPCDBG_TCEINIT, "\tTAR0 = 0x%lx\n", r64);
+ r64 = *(((unsigned long *)phb->chip_regs) +
+ (0x588>>3));
+ PPCDBG(PPCDBG_TCEINIT, "\tTAR1 = 0x%lx\n", r64);
+ r64 = *(((unsigned long *)phb->chip_regs) +
+ (0x590>>3));
+ PPCDBG(PPCDBG_TCEINIT, "\tTAR2 = 0x%lx\n", r64);
+ r64 = *(((unsigned long *)phb->chip_regs) +
+ (0x598>>3));
+ PPCDBG(PPCDBG_TCEINIT, "\tTAR3 = 0x%lx\n", r64);
+ cfg_rw = *(((unsigned int *)phb->chip_regs) +
+ ((0x160 +
+ (((phb->local_number)+8)<<12))>>2));
+ PPCDBG(PPCDBG_TCEINIT, "\tcfg_rw = 0x%x\n", cfg_rw);
+ }
+ }
+ i++;
+ }
+
+ PPCDBG(PPCDBG_TCEINIT, "phb_tce_table_init: done\n");
+
+ return(0);
+}
+
+/* These are called very early. */
+void tce_init_pSeries(void)
+{
+ ppc_md.tce_build = tce_build_pSeries;
+ ppc_md.tce_free = tce_free_pSeries;
+}
+
+void tce_init_iSeries(void)
+{
+ ppc_md.tce_build = tce_build_iSeries;
+ ppc_md.tce_free = tce_free_iSeries;
+}
--- /dev/null
+/*
+ * pci_dn.c
+ *
+ * Copyright (C) 2001 Todd Inglett, IBM Corporation
+ *
+ * PCI manipulation via device_nodes.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/init.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppcdebug.h>
+#include <asm/Naca.h>
+#include <asm/pci_dma.h>
+
+#include "pci.h"
+
+/* Traverse_func that inits the PCI fields of the device node.
+ * NOTE: this *must* be done before read/write config to the device.
+ */
+static void * __init
+update_dn_pci_info(struct device_node *dn, void *data)
+{
+ struct pci_controller *phb = (struct pci_controller *)data;
+ u32 *regs;
+ char *device_type = get_property(dn, "device_type", 0);
+
+ dn->phb = phb;
+ if (device_type && strcmp(device_type, "pci") == 0 && get_property(dn, "class-code", 0) == 0) {
+ /* special case for PHB's. Sigh. */
+ regs = (u32 *)get_property(dn, "bus-range", 0);
+ dn->busno = regs[0];
+ dn->devfn = 0; /* assumption */
+ } else {
+ regs = (u32 *)get_property(dn, "reg", 0);
+ if (regs) {
+ /* First register entry is addr (00BBSS00) */
+ dn->busno = (regs[0] >> 16) & 0xff;
+ dn->devfn = (regs[0] >> 8) & 0xff;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Hit all the BARs of all the devices with values from OF.
+ * This is unnecessary on most systems, but also harmless.
+ */
+static void * __init
+write_OF_bars(struct device_node *dn, void *data)
+{
+ int i;
+ u32 oldbar, newbar, newbartest;
+ u8 config_offset;
+ char *name = get_property(dn, "name", 0);
+ char *device_type = get_property(dn, "device_type", 0);
+ char devname[128];
+ sprintf(devname, "%04x:%02x.%x %s (%s)", dn->busno, PCI_SLOT(dn->devfn), PCI_FUNC(dn->devfn), name ? name : "<no name>", device_type ? device_type : "<unknown type>");
+
+ if (device_type && strcmp(device_type, "pci") == 0 &&
+ get_property(dn, "class-code", 0) == 0)
+ return NULL; /* This is probably a phb. Skip it. */
+
+ if (dn->n_addrs == 0)
+ return NULL; /* This is normal for some adapters or bridges */
+
+ if (dn->addrs == NULL) {
+ /* This shouldn't happen. */
+ printk(KERN_WARNING "write_OF_bars %s: device has %d BARs, but no addrs recorded\n", devname, dn->n_addrs);
+ return NULL;
+ }
+
+#ifndef CONFIG_PPC_ISERIES
+ for (i = 0; i < dn->n_addrs; i++) {
+ newbar = dn->addrs[i].address;
+ config_offset = dn->addrs[i].space & 0xff;
+ if (ppc_md.pcibios_read_config_dword(dn, config_offset, &oldbar) != PCIBIOS_SUCCESSFUL) {
+ printk(KERN_WARNING "write_OF_bars %s: read BAR%d failed\n", devname, i);
+ continue;
+ }
+ /* Need to update this BAR. */
+ if (ppc_md.pcibios_write_config_dword(dn, config_offset, newbar) != PCIBIOS_SUCCESSFUL) {
+ printk(KERN_WARNING "write_OF_bars %s: write BAR%d with 0x%08x failed (old was 0x%08x)\n", devname, i, newbar, oldbar);
+ continue;
+ }
+ /* sanity check */
+ if (ppc_md.pcibios_read_config_dword(dn, config_offset, &newbartest) != PCIBIOS_SUCCESSFUL) {
+ printk(KERN_WARNING "write_OF_bars %s: sanity test read BAR%d failed?\n", devname, i);
+ continue;
+ }
+ if ((newbar & PCI_BASE_ADDRESS_MEM_MASK) != (newbartest & PCI_BASE_ADDRESS_MEM_MASK)) {
+ printk(KERN_WARNING "write_OF_bars %s: oops...BAR%d read back as 0x%08x%s!\n", devname, i, newbartest, (oldbar & PCI_BASE_ADDRESS_MEM_MASK) == (newbartest & PCI_BASE_ADDRESS_MEM_MASK) ? " (original value)" : "");
+ continue;
+ }
+ }
+#endif
+ return NULL;
+}
+
+#if 0
+/* Traverse_func that starts the BIST (self test) */
+static void * __init
+startBIST(struct device_node *dn, void *data)
+{
+ struct pci_controller *phb = (struct pci_controller *)data;
+ u8 bist;
+
+ char *name = get_property(dn, "name", 0);
+ udbg_printf("startBIST: %s phb=%p, device=%p\n", name ? name : "<unknown>", phb, dn);
+
+ if (ppc_md.pcibios_read_config_byte(dn, PCI_BIST, &bist) == PCIBIOS_SUCCESSFUL) {
+ if (bist & PCI_BIST_CAPABLE) {
+ udbg_printf(" -> is BIST capable!\n", phb, dn);
+ /* Start bist here */
+ }
+ }
+ return NULL;
+}
+#endif
+
+
+/******************************************************************
+ * Traverse a device tree stopping each PCI device in the tree.
+ * This is done depth first. As each node is processed, a "pre"
+ * function is called, the children are processed recursively, and
+ * then a "post" function is called.
+ *
+ * The "pre" and "post" funcs return a value. If non-zero
+ * is returned from the "pre" func, the traversal stops and this
+ * value is returned. The return value from "post" is not used.
+ * This return value is useful when using traverse as
+ * a method of finding a device.
+ *
+ * NOTE: we do not run the funcs for devices that do not appear to
+ * be PCI except for the start node which we assume (this is good
+ * because the start node is often a phb which may be missing PCI
+ * properties).
+ * We use the class-code as an indicator. If we run into
+ * one of these nodes we also assume its siblings are non-pci for
+ * performance.
+ *
+ ******************************************************************/
+void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data)
+{
+ struct device_node *dn, *nextdn;
+ void *ret;
+
+ if (pre && (ret = pre(start, data)) != NULL)
+ return ret;
+ for (dn = start->child; dn; dn = nextdn) {
+ nextdn = NULL;
+ if (get_property(dn, "class-code", 0)) {
+ if (pre && (ret = pre(dn, data)) != NULL)
+ return ret;
+ if (dn->child) {
+ /* Depth first...do children */
+ nextdn = dn->child;
+ } else if (dn->sibling) {
+ /* ok, try next sibling instead. */
+ nextdn = dn->sibling;
+ } else {
+ /* no more children or siblings...call "post" */
+ if (post)
+ post(dn, data);
+ }
+ }
+ if (!nextdn) {
+ /* Walk up to next valid sibling. */
+ do {
+ dn = dn->parent;
+ if (dn == start)
+ return NULL;
+ } while (dn->sibling == NULL);
+ nextdn = dn->sibling;
+ }
+ }
+ return NULL;
+}
+
+/* Same as traverse_pci_devices except this does it for all phbs.
+ */
+void *traverse_all_pci_devices(traverse_func pre)
+{
+ struct pci_controller* phb;
+ void *ret;
+ for (phb=hose_head;phb;phb=phb->next)
+ if ((ret = traverse_pci_devices((struct device_node *)phb->arch_data, pre, NULL, phb)) != NULL)
+ return ret;
+ return NULL;
+}
+
+
+/* Traversal func that looks for a <busno,devfcn> value.
+ * If found, the device_node is returned (thus terminating the traversal).
+ */
+static void *
+is_devfn_node(struct device_node *dn, void *data)
+{
+ int busno = ((unsigned long)data >> 8) & 0xff;
+ int devfn = ((unsigned long)data) & 0xff;
+ return (devfn == dn->devfn && busno == dn->busno) ? dn : NULL;
+}
+
+/* Same as is_devfn_node except ignore the "fn" part of the "devfn".
+ */
+static void *
+is_devfn_sub_node(struct device_node *dn, void *data)
+{
+ int busno = ((unsigned long)data >> 8) & 0xff;
+ int devfn = ((unsigned long)data) & 0xf8;
+ return (devfn == (dn->devfn & 0xf8) && busno == dn->busno) ? dn : NULL;
+}
+
+/* Given an existing EADs (pci bridge) device node create a fake one
+ * that will simulate function zero. Make it a sibling of other_eads.
+ */
+static struct device_node *
+create_eads_node(struct device_node *other_eads)
+{
+ struct device_node *eads = (struct device_node *)kmalloc(sizeof(struct device_node), GFP_KERNEL);
+
+ if (!eads) return NULL; /* huh? */
+ *eads = *other_eads;
+ eads->devfn &= ~7; /* make it function zero */
+ eads->tce_table = NULL;
+ /* NOTE: share properties. We could copy but for now this should suffice.
+ * The full_name is also incorrect...but seems harmless.
+ */
+ eads->child = NULL;
+ eads->next = NULL;
+ other_eads->allnext = eads;
+ other_eads->sibling = eads;
+ return eads;
+}
+
+/* This is the "slow" path for looking up a device_node from a
+ * pci_dev. It will hunt for the device under it's parent's
+ * phb and then update sysdata for a future fastpath.
+ *
+ * It may also do fixups on the actual device since this happens
+ * on the first read/write.
+ *
+ * Note that it also must deal with devices that don't exist.
+ * In this case it may probe for real hardware ("just in case")
+ * and add a device_node to the device tree if necessary.
+ *
+ */
+struct device_node *fetch_dev_dn(struct pci_dev *dev)
+{
+ struct device_node *orig_dn = (struct device_node *)dev->sysdata;
+ struct pci_controller *phb = orig_dn->phb; /* assume same phb as orig_dn */
+ struct device_node *phb_dn;
+ struct device_node *dn;
+ unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
+
+ phb_dn = (struct device_node *)(phb->arch_data);
+ dn = (struct device_node *)traverse_pci_devices(phb_dn, is_devfn_node, NULL, (void *)searchval);
+ if (dn) {
+ dev->sysdata = dn;
+ /* ToDo: call some device init hook here */
+ } else {
+ /* Now it is very possible that we can't find the device because it is
+ * not the zero'th device of a mutifunction device and we don't have
+ * permission to read the zero'th device. If this is the case, Linux
+ * would ordinarily skip all the other functions.
+ */
+ if ((searchval & 0x7) == 0) {
+ struct device_node *thisdevdn;
+ /* Ok, we are looking for fn == 0. Let's check for other functions. */
+ thisdevdn = (struct device_node *)traverse_pci_devices(phb_dn, is_devfn_sub_node, NULL, (void *)searchval);
+ if (thisdevdn) {
+ /* Ah ha! There does exist a sub function. Now this isn't an exact
+ * match for searchval, but in order to get Linux to believe the sub
+ * functions exist we will need to manufacture a fake device_node
+ * for this zero'th function. To keept this simple for now we only
+ * handle pci bridges and we just hand back the found node which
+ * isn't correct, but Linux won't care.
+ */
+ char *device_type = (char *)get_property(thisdevdn, "device_type", 0);
+ if (device_type && strcmp(device_type, "pci") == 0) {
+ return create_eads_node(thisdevdn);
+ }
+ }
+ }
+ /* ToDo: device not found...probe for it anyway with a fake dn?
+ struct device_node fake_dn;
+ memset(&fake_dn, 0, sizeof(fake_dn));
+ fake_dn.phb = phb;
+ fake_dn.busno = dev->bus->number;
+ fake_dn.devfn = dev->devfn;
+ ... now do ppc_md.pcibios_read_config_dword(&fake_dn.....)
+ ... if ok, alloc a real device_node and dn = real_dn;
+ */
+ }
+ return dn;
+}
+
+
+/******************************************************************
+ * Actually initialize the phbs.
+ * The buswalk on this phb has not happened yet.
+ ******************************************************************/
+void __init
+pci_devs_phb_init(void)
+{
+ /* This must be done first so the device nodes have valid pci info! */
+ traverse_all_pci_devices(update_dn_pci_info);
+
+ /* Hack for regatta which does not init the bars correctly */
+ traverse_all_pci_devices(write_OF_bars);
+#if 0
+ traverse_all_pci_devices(startBIST);
+ mdelay(5000);
+ traverse_all_pci_devices(checkBIST);
+#endif
+}
+
+
+static void __init
+pci_fixup_bus_sysdata_list(struct list_head *bus_list)
+{
+ struct list_head *ln;
+ struct pci_bus *bus;
+ struct pci_controller *phb;
+ int newnum;
+
+ for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
+ bus = pci_bus_b(ln);
+ if (bus->self) {
+ bus->sysdata = bus->self->sysdata;
+ /* Also fixup the bus number on large bus systems to
+ * include the PHB# in the next byte
+ */
+ phb = PCI_GET_DN(bus)->phb;
+ if (phb && phb->buid) {
+ newnum = (phb->global_number << 8) | bus->number;
+ bus->number = newnum;
+ sprintf(bus->name, "PCI Bus #%x", bus->number);
+ }
+ }
+ pci_fixup_bus_sysdata_list(&bus->children);
+ }
+}
+
+
+/******************************************************************
+ * Fixup the bus->sysdata ptrs to point to the bus' device_node.
+ * This is done late in pcibios_init(). We do this mostly for
+ * sanity, but pci_dma.c uses these at DMA time so they must be
+ * correct.
+ * To do this we recurse down the bus hierarchy. Note that PHB's
+ * have bus->self == NULL, but fortunately bus->sysdata is already
+ * correct in this case.
+ ******************************************************************/
+void __init
+pci_fix_bus_sysdata(void)
+{
+ pci_fixup_bus_sysdata_list(&pci_root_buses);
+}
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Miscellaneous procedures for dealing with the PowerMac hardware.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/nvram.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <asm/init.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/nvram.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+
+#undef DEBUG
+
+/*
+ * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
+ */
+static int nvram_naddrs;
+static volatile unsigned char *nvram_addr;
+static volatile unsigned char *nvram_data;
+static int nvram_mult, is_core_99;
+static char* nvram_image;
+static int core99_bank = 0;
+sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
+
+#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */
+
+#define CORE99_SIGNATURE 0x5a
+#define CORE99_ADLER_START 0x14
+
+/* Core99 nvram is a flash */
+#define CORE99_FLASH_STATUS_DONE 0x80
+#define CORE99_FLASH_STATUS_ERR 0x38
+#define CORE99_FLASH_CMD_ERASE_CONFIRM 0xd0
+#define CORE99_FLASH_CMD_ERASE_SETUP 0x20
+#define CORE99_FLASH_CMD_RESET 0xff
+#define CORE99_FLASH_CMD_WRITE_SETUP 0x40
+
+/* CHRP NVRAM header */
+struct chrp_header {
+ u8 signature;
+ u8 cksum;
+ u16 len;
+ char name[12];
+ u8 data[0];
+};
+
+struct core99_header {
+ struct chrp_header hdr;
+ u32 adler;
+ u32 generation;
+ u32 reserved[2];
+};
+
+static int nvram_partitions[3];
+
+static u8
+chrp_checksum(struct chrp_header* hdr)
+{
+ u8 *ptr;
+ u16 sum = hdr->signature;
+ for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
+ sum += *ptr;
+ while (sum > 0xFF)
+ sum = (sum & 0xFF) + (sum>>8);
+ return sum;
+}
+
+static u32
+core99_calc_adler(u8 *buffer)
+{
+ int cnt;
+ u32 low, high;
+
+ buffer += CORE99_ADLER_START;
+ low = 1;
+ high = 0;
+ for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
+ if ((cnt % 5000) == 0) {
+ high %= 65521UL;
+ high %= 65521UL;
+ }
+ low += buffer[cnt];
+ high += low;
+ }
+ low %= 65521UL;
+ high %= 65521UL;
+
+ return (high << 16) | low;
+}
+
+static u32
+core99_check(u8* datas)
+{
+ struct core99_header* hdr99 = (struct core99_header*)datas;
+
+ if (hdr99->hdr.signature != CORE99_SIGNATURE) {
+#ifdef DEBUG
+ printk("Invalid signature\n");
+#endif
+ return 0;
+ }
+ if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
+#ifdef DEBUG
+ printk("Invalid checksum\n");
+#endif
+ return 0;
+ }
+ if (hdr99->adler != core99_calc_adler(datas)) {
+#ifdef DEBUG
+ printk("Invalid adler\n");
+#endif
+ return 0;
+ }
+ return hdr99->generation;
+}
+
+static int
+core99_erase_bank(int bank)
+{
+ int stat, i;
+
+ u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
+
+ out_8(base, CORE99_FLASH_CMD_ERASE_SETUP);
+ out_8(base, CORE99_FLASH_CMD_ERASE_CONFIRM);
+ do { stat = in_8(base); }
+ while(!(stat & CORE99_FLASH_STATUS_DONE));
+ out_8(base, CORE99_FLASH_CMD_RESET);
+ if (stat & CORE99_FLASH_STATUS_ERR) {
+ printk("nvram: flash error 0x%02x on erase !\n", stat);
+ return -ENXIO;
+ }
+ for (i=0; i<NVRAM_SIZE; i++)
+ if (base[i] != 0xff) {
+ printk("nvram: flash erase failed !\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int
+core99_write_bank(int bank, u8* datas)
+{
+ int i, stat = 0;
+
+ u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
+
+ for (i=0; i<NVRAM_SIZE; i++) {
+ out_8(base+i, CORE99_FLASH_CMD_WRITE_SETUP);
+ out_8(base+i, datas[i]);
+ do { stat = in_8(base); }
+ while(!(stat & CORE99_FLASH_STATUS_DONE));
+ if (stat & CORE99_FLASH_STATUS_ERR)
+ break;
+ }
+ out_8(base, CORE99_FLASH_CMD_RESET);
+ if (stat & CORE99_FLASH_STATUS_ERR) {
+ printk("nvram: flash error 0x%02x on write !\n", stat);
+ return -ENXIO;
+ }
+ for (i=0; i<NVRAM_SIZE; i++)
+ if (base[i] != datas[i]) {
+ printk("nvram: flash write failed !\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+
+__init
+void pmac_nvram_init(void)
+{
+ struct device_node *dp;
+
+ nvram_naddrs = 0;
+
+ dp = find_devices("nvram");
+ if (dp == NULL) {
+ printk(KERN_ERR "Can't find NVRAM device\n");
+ return;
+ }
+ nvram_naddrs = dp->n_addrs;
+ is_core_99 = device_is_compatible(dp, "nvram,flash");
+ if (is_core_99) {
+ int i;
+ u32 gen_bank0, gen_bank1;
+
+ if (nvram_naddrs < 1) {
+ printk(KERN_ERR "nvram: no address\n");
+ return;
+ }
+ nvram_image = kmalloc(NVRAM_SIZE, GFP_KERNEL);
+ if (!nvram_image) {
+ printk(KERN_ERR "nvram: can't allocate image\n");
+ return;
+ }
+ nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
+#ifdef DEBUG
+ printk("nvram: Checking bank 0...\n");
+#endif
+ gen_bank0 = core99_check((u8 *)nvram_data);
+ gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
+ core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
+#ifdef DEBUG
+ printk("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
+ printk("nvram: Active bank is: %d\n", core99_bank);
+#endif
+ for (i=0; i<NVRAM_SIZE; i++)
+ nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
+ } else if (_machine == _MACH_pSeries && nvram_naddrs == 1) {
+ nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+ nvram_mult = 1;
+ } else if (nvram_naddrs == 1) {
+ nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+ nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
+ } else if (nvram_naddrs == 2) {
+ nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+ nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
+ } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
+ nvram_naddrs = -1;
+ } else {
+ printk(KERN_ERR "Don't know how to access NVRAM with %d addresses\n",
+ nvram_naddrs);
+ }
+}
+
+void
+pmac_nvram_update(void)
+{
+ struct core99_header* hdr99;
+
+ if (!is_core_99 || !nvram_data || !nvram_image)
+ return;
+ if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
+ NVRAM_SIZE))
+ return;
+#ifdef DEBUG
+ printk("Updating nvram...\n");
+#endif
+ hdr99 = (struct core99_header*)nvram_image;
+ hdr99->generation++;
+ hdr99->hdr.signature = CORE99_SIGNATURE;
+ hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
+ hdr99->adler = core99_calc_adler(nvram_image);
+ core99_bank = core99_bank ? 0 : 1;
+ if (core99_erase_bank(core99_bank)) {
+ printk("nvram: Error erasing bank %d\n", core99_bank);
+ return;
+ }
+ if (core99_write_bank(core99_bank, nvram_image))
+ printk("nvram: Error writing bank %d\n", core99_bank);
+}
+
+__openfirmware
+unsigned char nvram_read_byte(int addr)
+{
+ #ifdef CONFIG_ADB_PMU // -aglitke
+ struct adb_request req;
+ #endif
+
+ switch (nvram_naddrs) {
+#ifdef CONFIG_ADB_PMU
+ case -1:
+ if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM,
+ (addr >> 8) & 0xff, addr & 0xff))
+ break;
+ while (!req.complete)
+ pmu_poll();
+ return req.reply[1];
+#endif
+ case 1:
+ if (is_core_99)
+ return nvram_image[addr];
+ return nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult];
+ case 2:
+ *nvram_addr = addr >> 5;
+ eieio();
+ return nvram_data[(addr & 0x1f) << 4];
+ }
+ return 0;
+}
+
+__openfirmware
+void nvram_write_byte(unsigned char val, int addr)
+{
+ #ifdef CONFIG_ADB_PMU // -aglitke
+ struct adb_request req;
+ #endif
+ switch (nvram_naddrs) {
+#ifdef CONFIG_ADB_PMU
+ case -1:
+ if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM,
+ (addr >> 8) & 0xff, addr & 0xff, val))
+ break;
+ while (!req.complete)
+ pmu_poll();
+ break;
+#endif
+ case 1:
+ if (is_core_99) {
+ nvram_image[addr] = val;
+ break;
+ }
+ nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult] = val;
+ break;
+ case 2:
+ *nvram_addr = addr >> 5;
+ eieio();
+ nvram_data[(addr & 0x1f) << 4] = val;
+ break;
+ }
+ eieio();
+}
+
+int
+pmac_get_partition(int partition)
+{
+ return nvram_partitions[partition];
+}
+
+u8
+pmac_xpram_read(int xpaddr)
+{
+ int offset = nvram_partitions[pmac_nvram_XPRAM];
+
+ if (offset < 0)
+ return 0;
+
+ return nvram_read_byte(xpaddr + offset);
+}
+
+void
+pmac_xpram_write(int xpaddr, u8 data)
+{
+ int offset = nvram_partitions[pmac_nvram_XPRAM];
+
+ if (offset < 0)
+ return;
+
+ nvram_write_byte(xpaddr + offset, data);
+}
+
+
--- /dev/null
+/*
+ * pmc.c
+ * Copyright (C) 2001 Dave Engebretsen & Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Change Activity:
+ * 2001/06/05 : engebret : Created.
+ * End Change Activity
+ */
+
+#include <asm/proc_fs.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/processor.h>
+
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <asm/pmc.h>
+#include <asm/uaccess.h>
+#include <asm/Naca.h>
+
+extern struct Naca *naca;
+
+struct _pmc_sw pmc_sw_system = {
+ 0
+};
+
+struct _pmc_sw pmc_sw_cpu[NR_CPUS] = {
+ {0 },
+};
+
+/*
+ * Provide enough storage for either system level counters or
+ * one cpu's counters.
+ */
+struct _pmc_sw_text pmc_sw_text;
+struct _pmc_hw_text pmc_hw_text;
+
+char *
+ppc64_pmc_stab(int file)
+{
+ int n;
+ unsigned long stab_faults, stab_capacity_castouts, stab_invalidations;
+ unsigned long i;
+
+ stab_faults = stab_capacity_castouts = stab_invalidations = n = 0;
+
+ if (file == -1) {
+ for (i = 0; i < smp_num_cpus; i++) {
+ stab_faults += pmc_sw_cpu[i].stab_faults;
+ stab_capacity_castouts += pmc_sw_cpu[i].stab_capacity_castouts;
+ stab_invalidations += pmc_sw_cpu[i].stab_invalidations;
+ }
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Faults 0x%lx\n", stab_faults);
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Castouts 0x%lx\n", stab_capacity_castouts);
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Invalidations 0x%lx\n", stab_invalidations);
+ } else {
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Faults 0x%lx\n",
+ pmc_sw_cpu[file].stab_faults);
+
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Castouts 0x%lx\n",
+ pmc_sw_cpu[file].stab_capacity_castouts);
+
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Invalidations 0x%lx\n",
+ pmc_sw_cpu[file].stab_invalidations);
+
+ for (i = 0; i < STAB_ENTRY_MAX; i++) {
+ if (pmc_sw_cpu[file].stab_entry_use[i]) {
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Entry %02ld 0x%lx\n", i,
+ pmc_sw_cpu[file].stab_entry_use[i]);
+ }
+ }
+
+ }
+
+ return(pmc_sw_text.buffer);
+}
+
+char *
+ppc64_pmc_htab(int file)
+{
+ int n;
+ unsigned long htab_primary_overflows, htab_capacity_castouts;
+ unsigned long htab_read_to_write_faults;
+
+ htab_primary_overflows = htab_capacity_castouts = 0;
+ htab_read_to_write_faults = n = 0;
+
+ if (file == -1) {
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Primary Overflows 0x%lx\n",
+ pmc_sw_system.htab_primary_overflows);
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Castouts 0x%lx\n",
+ pmc_sw_system.htab_capacity_castouts);
+ } else {
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Primary Overflows N/A\n");
+
+ n += sprintf(pmc_sw_text.buffer + n,
+ "Castouts N/A\n\n");
+
+ }
+
+ return(pmc_sw_text.buffer);
+}
+
+char *
+ppc64_pmc_hw(int file)
+{
+ int n;
+
+ n = 0;
+ if (file == -1) {
+ n += sprintf(pmc_hw_text.buffer + n, "Not Implemented\n");
+ } else {
+ n += sprintf(pmc_hw_text.buffer + n,
+ "MMCR0 0x%lx\n", mfspr(MMCR0));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "MMCR1 0x%lx\n", mfspr(MMCR1));
+#if 0
+ n += sprintf(pmc_hw_text.buffer + n,
+ "MMCRA 0x%lx\n", mfspr(MMCRA));
+#endif
+
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC1 0x%lx\n", mfspr(PMC1));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC2 0x%lx\n", mfspr(PMC2));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC3 0x%lx\n", mfspr(PMC3));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC4 0x%lx\n", mfspr(PMC4));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC5 0x%lx\n", mfspr(PMC5));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC6 0x%lx\n", mfspr(PMC6));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC7 0x%lx\n", mfspr(PMC7));
+ n += sprintf(pmc_hw_text.buffer + n,
+ "PMC8 0x%lx\n", mfspr(PMC8));
+ }
+
+ return(pmc_hw_text.buffer);
+}
--- /dev/null
+/*
+ * ppc-stub.c: KGDB support for the Linux kernel.
+ *
+ * adapted from arch/sparc/kernel/sparc-stub.c for the PowerPC
+ * some stuff borrowed from Paul Mackerras' xmon
+ * Copyright (C) 1998 Michael AK Tesch (tesch@cs.wisc.edu)
+ *
+ * Modifications to run under Linux
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * This file originally came from the gdb sources, and the
+ * copyright notices have been retained below.
+ */
+
+/****************************************************************************
+
+ THIS SOFTWARE IS NOT COPYRIGHTED
+
+ HP offers the following for use in the public domain. HP makes no
+ warranty with regard to the software or its performance and the
+ user accepts the software "AS IS" with all faults.
+
+ HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
+ TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+****************************************************************************/
+
+/****************************************************************************
+ * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
+ *
+ * Module name: remcom.c $
+ * Revision: 1.34 $
+ * Date: 91/03/09 12:29:49 $
+ * Contributor: Lake Stevens Instrument Division$
+ *
+ * Description: low level support for gdb debugger. $
+ *
+ * Considerations: only works on target hardware $
+ *
+ * Written by: Glenn Engel $
+ * ModuleState: Experimental $
+ *
+ * NOTES: See Below $
+ *
+ * Modified for SPARC by Stu Grossman, Cygnus Support.
+ *
+ * This code has been extensively tested on the Fujitsu SPARClite demo board.
+ *
+ * To enable debugger support, two things need to happen. One, a
+ * call to set_debug_traps() is necessary in order to allow any breakpoints
+ * or error conditions to be properly intercepted and reported to gdb.
+ * Two, a breakpoint needs to be generated to begin communication. This
+ * is most easily accomplished by a call to breakpoint(). Breakpoint()
+ * simulates a breakpoint by executing a trap #1.
+ *
+ *************
+ *
+ * The following gdb commands are supported:
+ *
+ * command function Return value
+ *
+ * g return the value of the CPU registers hex data or ENN
+ * G set the value of the CPU registers OK or ENN
+ * qOffsets Get section offsets. Reply is Text=xxx;Data=yyy;Bss=zzz
+ *
+ * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
+ * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
+ *
+ * c Resume at current address SNN ( signal NN)
+ * cAA..AA Continue at address AA..AA SNN
+ *
+ * s Step one instruction SNN
+ * sAA..AA Step one instruction from AA..AA SNN
+ *
+ * k kill
+ *
+ * ? What was the last sigval ? SNN (signal NN)
+ *
+ * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
+ * baud rate
+ *
+ * All commands and responses are sent with a packet which includes a
+ * checksum. A packet consists of
+ *
+ * $<packet info>#<checksum>.
+ *
+ * where
+ * <packet info> :: <characters representing the command or response>
+ * <checksum> :: <two hex digits computed as modulo 256 sum of <packetinfo>>
+ *
+ * When a packet is received, it is first acknowledged with either '+' or '-'.
+ * '+' indicates a successful transfer. '-' indicates a failed transfer.
+ *
+ * Example:
+ *
+ * Host: Reply:
+ * $m0,10#2a +$00010203040506070809101112131415#42
+ *
+ ****************************************************************************/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/system.h>
+#include <asm/signal.h>
+#include <asm/kgdb.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+
+void breakinst(void);
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound buffers
+ * at least NUMREGBYTES*2 are needed for register packets
+ */
+#define BUFMAX 2048
+static char remcomInBuffer[BUFMAX];
+static char remcomOutBuffer[BUFMAX];
+
+static int initialized = 0;
+static int kgdb_active = 0;
+static int kgdb_started = 0;
+static u_int fault_jmp_buf[100];
+static int kdebug;
+
+static const char hexchars[]="0123456789abcdef";
+
+/* Place where we save old trap entries for restoration - sparc*/
+/* struct tt_entry kgdb_savettable[256]; */
+/* typedef void (*trapfunc_t)(void); */
+
+#if 0
+/* Install an exception handler for kgdb */
+static void exceptionHandler(int tnum, unsigned int *tfunc)
+{
+ /* We are dorking with a live trap table, all irqs off */
+}
+#endif
+
+int
+kgdb_setjmp(long *buf)
+{
+ asm ("mflr 0; stw 0,0(%0);"
+ "stw 1,4(%0); stw 2,8(%0);"
+ "mfcr 0; stw 0,12(%0);"
+ "stmw 13,16(%0)"
+ : : "r" (buf));
+ /* XXX should save fp regs as well */
+ return 0;
+}
+void
+kgdb_longjmp(long *buf, int val)
+{
+ if (val == 0)
+ val = 1;
+ asm ("lmw 13,16(%0);"
+ "lwz 0,12(%0); mtcrf 0x38,0;"
+ "lwz 0,0(%0); lwz 1,4(%0); lwz 2,8(%0);"
+ "mtlr 0; mr 3,%1"
+ : : "r" (buf), "r" (val));
+}
+/* Convert ch from a hex digit to an int */
+static int
+hex(unsigned char ch)
+{
+ if (ch >= 'a' && ch <= 'f')
+ return ch-'a'+10;
+ if (ch >= '0' && ch <= '9')
+ return ch-'0';
+ if (ch >= 'A' && ch <= 'F')
+ return ch-'A'+10;
+ return -1;
+}
+
+/* Convert the memory pointed to by mem into hex, placing result in buf.
+ * Return a pointer to the last char put in buf (null), in case of mem fault,
+ * return 0.
+ */
+static unsigned char *
+mem2hex(char *mem, char *buf, int count)
+{
+ unsigned char ch;
+
+ if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
+ debugger_fault_handler = kgdb_fault_handler;
+ while (count-- > 0) {
+ ch = *mem++;
+ *buf++ = hexchars[ch >> 4];
+ *buf++ = hexchars[ch & 0xf];
+ }
+ } else {
+ /* error condition */
+ }
+ debugger_fault_handler = 0;
+ *buf = 0;
+ return buf;
+}
+
+/* convert the hex array pointed to by buf into binary to be placed in mem
+ * return a pointer to the character AFTER the last byte written.
+*/
+static char *
+hex2mem(char *buf, char *mem, int count)
+{
+ int i;
+ unsigned char ch;
+
+ if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
+ debugger_fault_handler = kgdb_fault_handler;
+ for (i=0; i<count; i++) {
+ ch = hex(*buf++) << 4;
+ ch |= hex(*buf++);
+ *mem++ = ch;
+ }
+ flush_icache_range((int)mem, (int)mem+count);
+ } else {
+ /* error condition */
+ }
+ debugger_fault_handler = 0;
+ return mem;
+}
+
+/*
+ * While we find nice hex chars, build an int.
+ * Return number of chars processed.
+ */
+static int
+hexToInt(char **ptr, int *intValue)
+{
+ int numChars = 0;
+ int hexValue;
+
+ *intValue = 0;
+
+ if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
+ debugger_fault_handler = kgdb_fault_handler;
+ while (**ptr) {
+ hexValue = hex(**ptr);
+ if (hexValue < 0)
+ break;
+
+ *intValue = (*intValue << 4) | hexValue;
+ numChars ++;
+
+ (*ptr)++;
+ }
+ } else {
+ /* error condition */
+ }
+ debugger_fault_handler = 0;
+
+ return (numChars);
+}
+
+/* scan for the sequence $<data>#<checksum> */
+static void
+getpacket(char *buffer)
+{
+ unsigned char checksum;
+ unsigned char xmitcsum;
+ int i;
+ int count;
+ unsigned char ch;
+
+ do {
+ /* wait around for the start character, ignore all other
+ * characters */
+ while ((ch = (getDebugChar() & 0x7f)) != '$') ;
+
+ checksum = 0;
+ xmitcsum = -1;
+
+ count = 0;
+
+ /* now, read until a # or end of buffer is found */
+ while (count < BUFMAX) {
+ ch = getDebugChar() & 0x7f;
+ if (ch == '#')
+ break;
+ checksum = checksum + ch;
+ buffer[count] = ch;
+ count = count + 1;
+ }
+
+ if (count >= BUFMAX)
+ continue;
+
+ buffer[count] = 0;
+
+ if (ch == '#') {
+ xmitcsum = hex(getDebugChar() & 0x7f) << 4;
+ xmitcsum |= hex(getDebugChar() & 0x7f);
+ if (checksum != xmitcsum)
+ putDebugChar('-'); /* failed checksum */
+ else {
+ putDebugChar('+'); /* successful transfer */
+ /* if a sequence char is present, reply the ID */
+ if (buffer[2] == ':') {
+ putDebugChar(buffer[0]);
+ putDebugChar(buffer[1]);
+ /* remove sequence chars from buffer */
+ count = strlen(buffer);
+ for (i=3; i <= count; i++)
+ buffer[i-3] = buffer[i];
+ }
+ }
+ }
+ } while (checksum != xmitcsum);
+}
+
+/* send the packet in buffer. */
+static void putpacket(unsigned char *buffer)
+{
+ unsigned char checksum;
+ int count;
+ unsigned char ch, recv;
+
+ /* $<packet info>#<checksum>. */
+ do {
+ putDebugChar('$');
+ checksum = 0;
+ count = 0;
+
+ while ((ch = buffer[count])) {
+ putDebugChar(ch);
+ checksum += ch;
+ count += 1;
+ }
+
+ putDebugChar('#');
+ putDebugChar(hexchars[checksum >> 4]);
+ putDebugChar(hexchars[checksum & 0xf]);
+ recv = getDebugChar();
+ } while ((recv & 0x7f) != '+');
+}
+
+static void kgdb_flush_cache_all(void)
+{
+ flush_instruction_cache();
+}
+
+
+/* Set up exception handlers for tracing and breakpoints
+ * [could be called kgdb_init()]
+ */
+void set_debug_traps(void)
+{
+#if 0
+ unsigned char c;
+
+ save_and_cli(flags);
+
+ /* In case GDB is started before us, ack any packets (presumably
+ * "$?#xx") sitting there.
+ *
+ * I've found this code causes more problems than it solves,
+ * so that's why it's commented out. GDB seems to work fine
+ * now starting either before or after the kernel -bwb
+ */
+
+ while((c = getDebugChar()) != '$');
+ while((c = getDebugChar()) != '#');
+ c = getDebugChar(); /* eat first csum byte */
+ c = getDebugChar(); /* eat second csum byte */
+ putDebugChar('+'); /* ack it */
+#endif
+ debugger = kgdb;
+ debugger_bpt = kgdb_bpt;
+ debugger_sstep = kgdb_sstep;
+ debugger_iabr_match = kgdb_iabr_match;
+ debugger_dabr_match = kgdb_dabr_match;
+
+ initialized = 1;
+}
+
+static void kgdb_fault_handler(struct pt_regs *regs)
+{
+ kgdb_longjmp((long*)fault_jmp_buf, 1);
+}
+
+int kgdb_bpt(struct pt_regs *regs)
+{
+ handle_exception(regs);
+ return 1;
+}
+
+int kgdb_sstep(struct pt_regs *regs)
+{
+ handle_exception(regs);
+ return 1;
+}
+
+void kgdb(struct pt_regs *regs)
+{
+ handle_exception(regs);
+}
+
+int kgdb_iabr_match(struct pt_regs *regs)
+{
+ printk("kgdb doesn't support iabr, what?!?\n");
+ handle_exception(regs);
+ return 1;
+}
+
+int kgdb_dabr_match(struct pt_regs *regs)
+{
+ printk("kgdb doesn't support dabr, what?!?\n");
+ handle_exception(regs);
+ return 1;
+}
+
+/* Convert the SPARC hardware trap type code to a unix signal number. */
+/*
+ * This table contains the mapping between PowerPC hardware trap types, and
+ * signals, which are primarily what GDB understands.
+ */
+static struct hard_trap_info
+{
+ unsigned int tt; /* Trap type code for powerpc */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ { 0x200, SIGSEGV }, /* machine check */
+ { 0x300, SIGSEGV }, /* address error (store) */
+ { 0x400, SIGBUS }, /* instruction bus error */
+ { 0x500, SIGINT }, /* interrupt */
+ { 0x600, SIGBUS }, /* alingment */
+ { 0x700, SIGTRAP }, /* breakpoint trap */
+ { 0x800, SIGFPE }, /* fpu unavail */
+ { 0x900, SIGALRM }, /* decrementer */
+ { 0xa00, SIGILL }, /* reserved */
+ { 0xb00, SIGILL }, /* reserved */
+ { 0xc00, SIGCHLD }, /* syscall */
+ { 0xd00, SIGTRAP }, /* single-step/watch */
+ { 0xe00, SIGFPE }, /* fp assist */
+ { 0, 0} /* Must be last */
+};
+
+static int computeSignal(unsigned int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+#define PC_REGNUM 64
+#define SP_REGNUM 1
+
+/*
+ * This function does all command processing for interfacing to gdb.
+ */
+static void
+handle_exception (struct pt_regs *regs)
+{
+ int sigval;
+ int addr;
+ int length;
+ char *ptr;
+ unsigned long msr;
+
+ if (debugger_fault_handler) {
+ debugger_fault_handler(regs);
+ panic("kgdb longjump failed!\n");
+ }
+ if (kgdb_active) {
+ printk("interrupt while in kgdb, returning\n");
+ return;
+ }
+ kgdb_active = 1;
+ kgdb_started = 1;
+
+#ifdef KGDB_DEBUG
+ printk("kgdb: entering handle_exception; trap [0x%x]\n",
+ (unsigned int)regs->trap);
+#endif
+
+ kgdb_interruptible(0);
+ lock_kernel();
+ msr = get_msr();
+ set_msr(msr & ~MSR_EE); /* disable interrupts */
+
+ if (regs->nip == (unsigned long)breakinst) {
+ /* Skip over breakpoint trap insn */
+ regs->nip += 4;
+ }
+
+ /* reply to host that an exception has occurred */
+ sigval = computeSignal(regs->trap);
+ ptr = remcomOutBuffer;
+
+#if 0
+ *ptr++ = 'S';
+ *ptr++ = hexchars[sigval >> 4];
+ *ptr++ = hexchars[sigval & 0xf];
+#else
+ *ptr++ = 'T';
+ *ptr++ = hexchars[sigval >> 4];
+ *ptr++ = hexchars[sigval & 0xf];
+ *ptr++ = hexchars[PC_REGNUM >> 4];
+ *ptr++ = hexchars[PC_REGNUM & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex((char *)®s->nip, ptr, 4);
+ *ptr++ = ';';
+ *ptr++ = hexchars[SP_REGNUM >> 4];
+ *ptr++ = hexchars[SP_REGNUM & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex(((char *)®s) + SP_REGNUM*4, ptr, 4);
+ *ptr++ = ';';
+#endif
+
+ *ptr++ = 0;
+
+ putpacket(remcomOutBuffer);
+
+ /* XXX We may want to add some features dealing with poking the
+ * XXX page tables, ... (look at sparc-stub.c for more info)
+ * XXX also required hacking to the gdb sources directly...
+ */
+
+ while (1) {
+ remcomOutBuffer[0] = 0;
+
+ getpacket(remcomInBuffer);
+ switch (remcomInBuffer[0]) {
+ case '?': /* report most recent signal */
+ remcomOutBuffer[0] = 'S';
+ remcomOutBuffer[1] = hexchars[sigval >> 4];
+ remcomOutBuffer[2] = hexchars[sigval & 0xf];
+ remcomOutBuffer[3] = 0;
+ break;
+#if 0
+ case 'q': /* this screws up gdb for some reason...*/
+ {
+ extern long _start, sdata, __bss_start;
+
+ ptr = &remcomInBuffer[1];
+ if (strncmp(ptr, "Offsets", 7) != 0)
+ break;
+
+ ptr = remcomOutBuffer;
+ sprintf(ptr, "Text=%8.8x;Data=%8.8x;Bss=%8.8x",
+ &_start, &sdata, &__bss_start);
+ break;
+ }
+#endif
+ case 'd':
+ /* toggle debug flag */
+ kdebug ^= 1;
+ break;
+
+ case 'g': /* return the value of the CPU registers.
+ * some of them are non-PowerPC names :(
+ * they are stored in gdb like:
+ * struct {
+ * u32 gpr[32];
+ * f64 fpr[32];
+ * u32 pc, ps, cnd, lr; (ps=msr)
+ * u32 cnt, xer, mq;
+ * }
+ */
+ {
+ int i;
+ ptr = remcomOutBuffer;
+ /* General Purpose Regs */
+ ptr = mem2hex((char *)regs, ptr, 32 * 4);
+ /* Floating Point Regs - FIXME */
+ /*ptr = mem2hex((char *), ptr, 32 * 8);*/
+ for(i=0; i<(32*8*2); i++) { /* 2chars/byte */
+ ptr[i] = '0';
+ }
+ ptr += 32*8*2;
+ /* pc, msr, cr, lr, ctr, xer, (mq is unused) */
+ ptr = mem2hex((char *)®s->nip, ptr, 4);
+ ptr = mem2hex((char *)®s->msr, ptr, 4);
+ ptr = mem2hex((char *)®s->ccr, ptr, 4);
+ ptr = mem2hex((char *)®s->link, ptr, 4);
+ ptr = mem2hex((char *)®s->ctr, ptr, 4);
+ ptr = mem2hex((char *)®s->xer, ptr, 4);
+ }
+ break;
+
+ case 'G': /* set the value of the CPU registers */
+ {
+ ptr = &remcomInBuffer[1];
+
+ /*
+ * If the stack pointer has moved, you should pray.
+ * (cause only god can help you).
+ */
+
+ /* General Purpose Regs */
+ hex2mem(ptr, (char *)regs, 32 * 4);
+
+ /* Floating Point Regs - FIXME?? */
+ /*ptr = hex2mem(ptr, ??, 32 * 8);*/
+ ptr += 32*8*2;
+
+ /* pc, msr, cr, lr, ctr, xer, (mq is unused) */
+ ptr = hex2mem(ptr, (char *)®s->nip, 4);
+ ptr = hex2mem(ptr, (char *)®s->msr, 4);
+ ptr = hex2mem(ptr, (char *)®s->ccr, 4);
+ ptr = hex2mem(ptr, (char *)®s->link, 4);
+ ptr = hex2mem(ptr, (char *)®s->ctr, 4);
+ ptr = hex2mem(ptr, (char *)®s->xer, 4);
+
+ strcpy(remcomOutBuffer,"OK");
+ }
+ break;
+ case 'H':
+ /* don't do anything, yet, just acknowledge */
+ hexToInt(&ptr, &addr);
+ strcpy(remcomOutBuffer,"OK");
+ break;
+
+ case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
+ /* Try to read %x,%x. */
+
+ ptr = &remcomInBuffer[1];
+
+ if (hexToInt(&ptr, &addr)
+ && *ptr++ == ','
+ && hexToInt(&ptr, &length)) {
+ if (mem2hex((char *)addr, remcomOutBuffer,length))
+ break;
+ strcpy (remcomOutBuffer, "E03");
+ } else {
+ strcpy(remcomOutBuffer,"E01");
+ }
+ break;
+
+ case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
+ /* Try to read '%x,%x:'. */
+
+ ptr = &remcomInBuffer[1];
+
+ if (hexToInt(&ptr, &addr)
+ && *ptr++ == ','
+ && hexToInt(&ptr, &length)
+ && *ptr++ == ':') {
+ if (hex2mem(ptr, (char *)addr, length)) {
+ strcpy(remcomOutBuffer, "OK");
+ } else {
+ strcpy(remcomOutBuffer, "E03");
+ }
+ flush_icache_range(addr, addr+length);
+ } else {
+ strcpy(remcomOutBuffer, "E02");
+ }
+ break;
+
+
+ case 'k': /* kill the program, actually just continue */
+ case 'c': /* cAA..AA Continue; address AA..AA optional */
+ /* try to read optional parameter, pc unchanged if no parm */
+
+ ptr = &remcomInBuffer[1];
+ if (hexToInt(&ptr, &addr)) {
+ regs->nip = addr;
+ }
+
+/* Need to flush the instruction cache here, as we may have deposited a
+ * breakpoint, and the icache probably has no way of knowing that a data ref to
+ * some location may have changed something that is in the instruction cache.
+ */
+ kgdb_flush_cache_all();
+ set_msr(msr);
+ kgdb_interruptible(1);
+ unlock_kernel();
+ kgdb_active = 0;
+ return;
+
+ case 's':
+ kgdb_flush_cache_all();
+ regs->msr |= MSR_SE;
+#if 0
+ set_msr(msr | MSR_SE);
+#endif
+ unlock_kernel();
+ kgdb_active = 0;
+ return;
+
+ case 'r': /* Reset (if user process..exit ???)*/
+ panic("kgdb reset.");
+ break;
+ } /* switch */
+ if (remcomOutBuffer[0] && kdebug) {
+ printk("remcomInBuffer: %s\n", remcomInBuffer);
+ printk("remcomOutBuffer: %s\n", remcomOutBuffer);
+ }
+ /* reply to the request */
+ putpacket(remcomOutBuffer);
+ } /* while(1) */
+}
+
+/* This function will generate a breakpoint exception. It is used at the
+ beginning of a program to sync up with a debugger and can be used
+ otherwise as a quick means to stop program execution and "break" into
+ the debugger. */
+
+void
+breakpoint(void)
+{
+ if (!initialized) {
+ printk("breakpoint() called b4 kgdb init\n");
+ return;
+ }
+
+ asm(" .globl breakinst
+ breakinst: .long 0x7d821008
+ ");
+}
+
+/* Output string in GDB O-packet format if GDB has connected. If nothing
+ output, returns 0 (caller must then handle output). */
+int
+kgdb_output_string (const char* s, unsigned int count)
+{
+ char buffer[512];
+
+ if (!kgdb_started)
+ return 0;
+
+ count = (count <= (sizeof(buffer) / 2 - 2))
+ ? count : (sizeof(buffer) / 2 - 2);
+
+ buffer[0] = 'O';
+ mem2hex (s, &buffer[1], count);
+ putpacket(buffer);
+
+ return 1;
+ }
--- /dev/null
+/*
+ * arch/ppc/kernel/ppc_asm.h
+ *
+ * Definitions used by various bits of low-level assembly code on PowerPC.
+ *
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+
+#include "ppc_asm.tmpl"
+#include "ppc_defs.h"
+
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
+#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
+#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
+#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base)
+#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+#define CHECKANYINT(ra,rb) \
+ mfspr rb,SPRG3; /* Get Paca address */\
+ ld ra,PACALPPACA+LPPACAANYINT(rb); /* Get pending interrupt flags */\
+ cmpldi 0,ra,0;
+
+/* Macros to adjust thread priority for Iseries hardware multithreading */
+#define HMT_LOW or 1,1,1
+#define HMT_MEDIUM or 2,2,2
+#define HMT_HIGH or 3,3,3
+
+/* Insert the high 32 bits of the MSR into what will be the new
+ MSR (via SRR1 and rfid) This preserves the MSR.SF and MSR.ISF
+ bits. */
+
+#define FIX_SRR1(ra, rb) \
+ mr rb,ra; \
+ mfmsr ra; \
+ rldimi ra,rb,0,32
+
+#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
+
+/*
+ * LOADADDR( rn, name )
+ * loads the address of 'name' into 'rn'
+ *
+ * LOADBASE( rn, name )
+ * loads the address (less the low 16 bits) of 'name' into 'rn'
+ * suitable for base+disp addressing
+ */
+#define LOADADDR(rn,name) \
+ lis rn,name##@highest; \
+ ori rn,rn,name##@higher; \
+ rldicr rn,rn,32,31; \
+ oris rn,rn,name##@h; \
+ ori rn,rn,name##@l
+
+#define LOADBASE(rn,name) \
+ lis rn,name@highest; \
+ ori rn,rn,name@higher; \
+ rldicr rn,rn,32,31; \
+ oris rn,rn,name@ha
+
+
+#define SET_REG_TO_CONST(reg, value) \
+ lis reg,(((value)>>48)&0xFFFF); \
+ ori reg,reg,(((value)>>32)&0xFFFF); \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(((value)>>16)&0xFFFF); \
+ ori reg,reg,((value)&0xFFFF);
+
+#define SET_REG_TO_LABEL(reg, label) \
+ lis reg,(label)@highest; \
+ ori reg,reg,(label)@higher; \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(label)@h; \
+ ori reg,reg,(label)@l;
+
+
+/* PPPBBB - DRENG If KERNELBASE is always 0xC0...,
+ * Then we can easily do this with one asm insn. -Peter
+ */
+#define tophys(rd,rs) \
+ lis rd,((KERNELBASE>>48)&0xFFFF); \
+ rldicr rd,rd,32,31; \
+ sub rd,rs,rd
+
+#define tovirt(rd,rs) \
+ lis rd,((KERNELBASE>>48)&0xFFFF); \
+ rldicr rd,rd,32,31; \
+ add rd,rs,rd
+
--- /dev/null
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/* General Purpose Registers (GPRs) */
+
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
--- /dev/null
+/*
+ * WARNING! This file is automatically generated - DO NOT EDIT!
+ */
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/nvram.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include <asm/page.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#if 0
+#include <linux/ide.h>
+#include <asm/ide.h>
+#endif
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/checksum.h>
+#include <asm/pgtable.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/pci-bridge.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/hw_irq.h>
+#include <asm/abs_addr.h>
+#ifdef CONFIG_SMP
+#include <asm/smplock.h>
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_ISERIES
+#include <asm/iSeries/iSeries_pci.h>
+#include <asm/iSeries/iSeries_proc.h>
+#endif
+
+/* Tell string.h we don't want memcpy etc. as cpp defines */
+#define EXPORT_SYMTAB_STROPS
+
+extern void do_IRQ(struct pt_regs *regs, int isfake);
+extern void SystemResetException(struct pt_regs *regs);
+extern void MachineCheckException(struct pt_regs *regs);
+extern void AlignmentException(struct pt_regs *regs);
+extern void ProgramCheckException(struct pt_regs *regs);
+extern void SingleStepException(struct pt_regs *regs);
+extern int sys_sigreturn(struct pt_regs *regs);
+extern int do_signal(sigset_t *, struct pt_regs *);
+extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
+extern int unregister_ioctl32_conversion(unsigned int cmd);
+
+long long __ashrdi3(long long, int);
+long long __ashldi3(long long, int);
+long long __lshrdi3(long long, int);
+int abs(int);
+
+extern struct pci_dev * iSeries_veth_dev;
+extern struct pci_dev * iSeries_vio_dev;
+
+EXPORT_SYMBOL(do_signal);
+EXPORT_SYMBOL(do_IRQ);
+EXPORT_SYMBOL(SystemResetException);
+EXPORT_SYMBOL(MachineCheckException);
+EXPORT_SYMBOL(AlignmentException);
+EXPORT_SYMBOL(ProgramCheckException);
+EXPORT_SYMBOL(SingleStepException);
+EXPORT_SYMBOL(sys_sigreturn);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(kernel_flag);
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(smp_num_cpus);
+#endif /* CONFIG_SMP */
+
+EXPORT_SYMBOL(register_ioctl32_conversion);
+EXPORT_SYMBOL(unregister_ioctl32_conversion);
+
+EXPORT_SYMBOL(isa_io_base);
+EXPORT_SYMBOL(isa_mem_base);
+EXPORT_SYMBOL(pci_io_base);
+EXPORT_SYMBOL(pci_dram_offset);
+
+EXPORT_SYMBOL(find_next_zero_bit);
+
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strtok);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(naca);
+EXPORT_SYMBOL(__down);
+
+/* EXPORT_SYMBOL(csum_partial); already in net/netsyms.c */
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_tcpudp_magic);
+
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+
+/*
+EXPORT_SYMBOL(inb);
+EXPORT_SYMBOL(inw);
+EXPORT_SYMBOL(inl);
+EXPORT_SYMBOL(outb);
+EXPORT_SYMBOL(outw);
+EXPORT_SYMBOL(outl);
+EXPORT_SYMBOL(outsl);*/
+
+#ifdef CONFIG_MSCHUNKS
+EXPORT_SYMBOL(msChunks);
+#endif
+EXPORT_SYMBOL(reloc_offset);
+
+#ifdef CONFIG_PPC_ISERIES
+EXPORT_SYMBOL(iSeries_proc_callback);
+EXPORT_SYMBOL(HvCall0);
+EXPORT_SYMBOL(HvCall1);
+EXPORT_SYMBOL(HvCall2);
+EXPORT_SYMBOL(HvCall3);
+EXPORT_SYMBOL(HvCall4);
+EXPORT_SYMBOL(HvCall5);
+EXPORT_SYMBOL(HvCall6);
+EXPORT_SYMBOL(HvCall7);
+#endif
+
+EXPORT_SYMBOL(_insb);
+EXPORT_SYMBOL(_outsb);
+EXPORT_SYMBOL(_insw);
+EXPORT_SYMBOL(_outsw);
+EXPORT_SYMBOL(_insl);
+EXPORT_SYMBOL(_outsl);
+EXPORT_SYMBOL(_insw_ns);
+EXPORT_SYMBOL(_outsw_ns);
+EXPORT_SYMBOL(_insl_ns);
+EXPORT_SYMBOL(_outsl_ns);
+EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_map_single);
+EXPORT_SYMBOL(pci_unmap_single);
+EXPORT_SYMBOL(pci_map_sg);
+EXPORT_SYMBOL(pci_unmap_sg);
+#ifdef CONFIG_PPC_ISERIES
+EXPORT_SYMBOL(iSeries_Write_Long);
+EXPORT_SYMBOL(iSeries_GetLocationData);
+EXPORT_SYMBOL(iSeries_Read_Long);
+EXPORT_SYMBOL(iSeries_Device_ToggleReset);
+EXPORT_SYMBOL(iSeries_Write_Word);
+EXPORT_SYMBOL(iSeries_memcpy_fromio);
+EXPORT_SYMBOL(iSeries_Read_Word);
+EXPORT_SYMBOL(iSeries_Read_Byte);
+EXPORT_SYMBOL(iSeries_Write_Byte);
+
+#endif /* CONFIG_PPC_ISERIES */
+#ifdef CONFIG_PPC_EEH
+EXPORT_SYMBOL(eeh_check_failure);
+EXPORT_SYMBOL(eeh_total_mmio_ffs);
+EXPORT_SYMBOL(eeh_total_mmio_reads);
+#endif /* CONFIG_PPC_EEH */
+#endif /* CONFIG_PCI */
+
+EXPORT_SYMBOL(iSeries_veth_dev);
+EXPORT_SYMBOL(iSeries_vio_dev);
+
+EXPORT_SYMBOL(start_thread);
+EXPORT_SYMBOL(kernel_thread);
+
+EXPORT_SYMBOL(flush_instruction_cache);
+EXPORT_SYMBOL(_get_PVR);
+EXPORT_SYMBOL(giveup_fpu);
+EXPORT_SYMBOL(enable_kernel_fp);
+EXPORT_SYMBOL(flush_icache_range);
+EXPORT_SYMBOL(flush_icache_user_range);
+EXPORT_SYMBOL(flush_icache_page);
+EXPORT_SYMBOL(flush_dcache_page);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__global_cli);
+EXPORT_SYMBOL(__global_sti);
+EXPORT_SYMBOL(__global_save_flags);
+EXPORT_SYMBOL(__global_restore_flags);
+#ifdef CONFIG_PPC_ISERIES
+EXPORT_SYMBOL(__no_use_restore_flags);
+EXPORT_SYMBOL(__no_use_save_flags);
+EXPORT_SYMBOL(__no_use_sti);
+EXPORT_SYMBOL(__no_use_cli);
+#endif
+#endif
+
+#ifndef CONFIG_MACH_SPECIFIC
+EXPORT_SYMBOL(_machine);
+#endif
+EXPORT_SYMBOL(ppc_md);
+
+EXPORT_SYMBOL(find_devices);
+EXPORT_SYMBOL(find_type_devices);
+EXPORT_SYMBOL(find_compatible_devices);
+EXPORT_SYMBOL(find_path_device);
+EXPORT_SYMBOL(device_is_compatible);
+EXPORT_SYMBOL(machine_is_compatible);
+EXPORT_SYMBOL(find_all_nodes);
+EXPORT_SYMBOL(get_property);
+
+#ifndef CONFIG_PPC_ISERIES
+EXPORT_SYMBOL_NOVERS(sys_ctrler); /* tibit */
+#endif
+#ifdef CONFIG_NVRAM
+EXPORT_SYMBOL(nvram_read_byte);
+EXPORT_SYMBOL(nvram_write_byte);
+#endif /* CONFIG_NVRAM */
+
+EXPORT_SYMBOL_NOVERS(__ashrdi3);
+EXPORT_SYMBOL_NOVERS(__ashldi3);
+EXPORT_SYMBOL_NOVERS(__lshrdi3);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL_NOVERS(memset);
+EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(memscan);
+EXPORT_SYMBOL_NOVERS(memcmp);
+
+EXPORT_SYMBOL(abs);
+
+EXPORT_SYMBOL(timer_interrupt);
+EXPORT_SYMBOL(irq_desc);
+void ppc_irq_dispatch_handler(struct pt_regs *, int);
+EXPORT_SYMBOL(ppc_irq_dispatch_handler);
+EXPORT_SYMBOL(get_wchan);
+EXPORT_SYMBOL(console_drivers);
+#ifdef CONFIG_XMON
+EXPORT_SYMBOL(xmon);
+#endif
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
+extern void (*debugger)(struct pt_regs *regs);
+extern int (*debugger_bpt)(struct pt_regs *regs);
+extern int (*debugger_sstep)(struct pt_regs *regs);
+extern int (*debugger_iabr_match)(struct pt_regs *regs);
+extern int (*debugger_dabr_match)(struct pt_regs *regs);
+extern void (*debugger_fault_handler)(struct pt_regs *regs);
+
+EXPORT_SYMBOL(debugger);
+EXPORT_SYMBOL(debugger_bpt);
+EXPORT_SYMBOL(debugger_sstep);
+EXPORT_SYMBOL(debugger_iabr_match);
+EXPORT_SYMBOL(debugger_dabr_match);
+EXPORT_SYMBOL(debugger_fault_handler);
+#endif
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(atomic_dec_and_lock);
+#endif
+
+EXPORT_SYMBOL(tb_ticks_per_usec);
--- /dev/null
+/*
+ * proc_pmc.c
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity:
+ * 2001 : mikec : Created
+ * 2001/06/05 : engebret : Software event count support.
+ * End Change Activity
+ */
+
+#include <asm/proc_fs.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/HvCallXm.h>
+#include <asm/iSeries/IoHriMainStore.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/iSeries/LparData.h>
+
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <asm/pmc.h>
+#include <asm/uaccess.h>
+#include <asm/Naca.h>
+
+
+static int proc_pmc_control_mode = 0;
+
+static struct proc_dir_entry *proc_ppc64_root = NULL;
+static struct proc_dir_entry *proc_ppc64_pmc_root = NULL;
+static struct proc_dir_entry *proc_ppc64_pmc_system_root = NULL;
+static struct proc_dir_entry *proc_ppc64_pmc_cpu_root[NR_CPUS] = {NULL, };
+
+static spinlock_t proc_ppc64_lock;
+
+extern struct Naca *naca;
+
+int proc_ppc64_pmc_find_file(void *data);
+int proc_ppc64_pmc_read(char *page, char **start, off_t off,
+ int count, int *eof, char *buffer);
+int proc_ppc64_pmc_stab_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int proc_ppc64_pmc_htab_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int proc_ppc64_pmc_hw_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+
+static struct proc_dir_entry *pmc_proc_root = NULL;
+
+int proc_get_lpevents( char *page, char **start, off_t off, int count, int *eof, void *data);
+int proc_reset_lpevents( struct file *file, const char *buffer, unsigned long count, void *data);
+
+int proc_get_titanTod( char *page, char **start, off_t off, int count, int *eof, void *data);
+
+int proc_pmc_get_control( char *page, char **start, off_t off, int count, int *eof, void *data);
+
+int proc_pmc_set_control( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_mmcr0( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_mmcr1( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_mmcra( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc1( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc2( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc3( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc4( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc5( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc6( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc7( struct file *file, const char *buffer, unsigned long count, void *data);
+int proc_pmc_set_pmc8( struct file *file, const char *buffer, unsigned long count, void *data);
+
+
+void proc_ppc64_init(void)
+{
+ unsigned long i;
+ struct proc_dir_entry *ent = NULL;
+ char buf[256];
+
+ printk("proc_ppc64: Creating /proc/ppc64/pmc\n");
+
+ /*
+ * Create the root, system, and cpu directories as follows:
+ * /proc/ppc64/pmc/system
+ * /proc/ppc64/pmc/cpu0
+ */
+ spin_lock(&proc_ppc64_lock);
+ proc_ppc64_root = proc_mkdir("ppc64", 0);
+ if (!proc_ppc64_root) return;
+ spin_unlock(&proc_ppc64_lock);
+
+#ifdef CONFIG_PPC_EEH
+ eeh_init_proc(proc_ppc64_root);
+#endif
+
+ proc_ppc64_pmc_root = proc_mkdir("pmc", proc_ppc64_root);
+
+ proc_ppc64_pmc_system_root = proc_mkdir("system", proc_ppc64_pmc_root);
+ for (i = 0; i < naca->processorCount; i++) {
+ sprintf(buf, "cpu%ld", i);
+ proc_ppc64_pmc_cpu_root[i] = proc_mkdir(buf, proc_ppc64_pmc_root);
+ }
+
+
+ /* Create directories for the software counters. */
+ for (i = 0; i < naca->processorCount; i++) {
+ ent = create_proc_entry("stab", S_IRUGO | S_IWUSR,
+ proc_ppc64_pmc_cpu_root[i]);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = (void *)proc_ppc64_pmc_cpu_root[i];
+ ent->read_proc = (void *)proc_ppc64_pmc_stab_read;
+ ent->write_proc = (void *)proc_ppc64_pmc_stab_read;
+ }
+
+ ent = create_proc_entry("htab", S_IRUGO | S_IWUSR,
+ proc_ppc64_pmc_cpu_root[i]);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = (void *)proc_ppc64_pmc_cpu_root[i];
+ ent->read_proc = (void *)proc_ppc64_pmc_htab_read;
+ ent->write_proc = (void *)proc_ppc64_pmc_htab_read;
+ }
+ }
+
+ ent = create_proc_entry("stab", S_IRUGO | S_IWUSR,
+ proc_ppc64_pmc_system_root);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = (void *)proc_ppc64_pmc_system_root;
+ ent->read_proc = (void *)proc_ppc64_pmc_stab_read;
+ ent->write_proc = (void *)proc_ppc64_pmc_stab_read;
+ }
+
+ ent = create_proc_entry("htab", S_IRUGO | S_IWUSR,
+ proc_ppc64_pmc_system_root);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = (void *)proc_ppc64_pmc_system_root;
+ ent->read_proc = (void *)proc_ppc64_pmc_htab_read;
+ ent->write_proc = (void *)proc_ppc64_pmc_htab_read;
+ }
+
+ /* Create directories for the hardware counters. */
+ for (i = 0; i < naca->processorCount; i++) {
+ ent = create_proc_entry("hardware", S_IRUGO | S_IWUSR,
+ proc_ppc64_pmc_cpu_root[i]);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = (void *)proc_ppc64_pmc_cpu_root[i];
+ ent->read_proc = (void *)proc_ppc64_pmc_hw_read;
+ ent->write_proc = (void *)proc_ppc64_pmc_hw_read;
+ }
+ }
+
+ ent = create_proc_entry("hardware", S_IRUGO | S_IWUSR,
+ proc_ppc64_pmc_system_root);
+ if (ent) {
+ ent->nlink = 1;
+ ent->data = (void *)proc_ppc64_pmc_system_root;
+ ent->read_proc = (void *)proc_ppc64_pmc_hw_read;
+ ent->write_proc = (void *)proc_ppc64_pmc_hw_read;
+ }
+}
+
+/*
+ * Find the requested 'file' given a proc token.
+ *
+ * Inputs: void * data: proc token
+ * Output: int : (0, ..., +N) = CPU number.
+ * -1 = System.
+ */
+int proc_ppc64_pmc_find_file(void *data)
+{
+ int i;
+
+ if ((unsigned long)data ==
+ (unsigned long) proc_ppc64_pmc_system_root) {
+ return(-1);
+ } else {
+ for (i = 0; i < naca->processorCount; i++) {
+ if ((unsigned long)data ==
+ (unsigned long)proc_ppc64_pmc_cpu_root[i]) {
+ return(i);
+ }
+ }
+ }
+
+ /* On error, just default to a type of system. */
+ printk("proc_ppc64_pmc_find_file: failed to find file token.\n");
+ return(-1);
+}
+
+int
+proc_ppc64_pmc_read(char *page, char **start, off_t off,
+ int count, int *eof, char *buffer)
+{
+ int buffer_size, n;
+
+ if (count < 0) return 0;
+
+ if (buffer == NULL) {
+ *eof = 1;
+ return 0;
+ }
+
+ /* Check for read beyond EOF */
+ buffer_size = n = strlen(buffer);
+ if (off >= buffer_size) {
+ *eof = 1;
+ return 0;
+ }
+ if (n > (buffer_size - off)) n = buffer_size - off;
+
+ /* Never return more than was requested */
+ if (n > count) {
+ n = count;
+ } else {
+ *eof = 1;
+ }
+
+ memcpy(page, buffer + off, n);
+
+ *start = page;
+
+ return n;
+}
+
+int
+proc_ppc64_pmc_stab_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int n, file;
+ char *buffer = NULL;
+
+ if (count < 0) return 0;
+ spin_lock(&proc_ppc64_lock);
+
+ /* Figure out which file is being request. */
+ file = proc_ppc64_pmc_find_file(data);
+
+ /* Update the counters and the text buffer representation. */
+ buffer = ppc64_pmc_stab(file);
+
+ /* Put the data into the requestor's buffer. */
+ n = proc_ppc64_pmc_read(page, start, off, count, eof, buffer);
+
+ spin_unlock(&proc_ppc64_lock);
+ return n;
+}
+
+int
+proc_ppc64_pmc_htab_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int n, file;
+ char *buffer = NULL;
+
+ if (count < 0) return 0;
+ spin_lock(&proc_ppc64_lock);
+
+ /* Figure out which file is being request. */
+ file = proc_ppc64_pmc_find_file(data);
+
+ /* Update the counters and the text buffer representation. */
+ buffer = ppc64_pmc_htab(file);
+
+ /* Put the data into the requestor's buffer. */
+ n = proc_ppc64_pmc_read(page, start, off, count, eof, buffer);
+
+ spin_unlock(&proc_ppc64_lock);
+ return n;
+}
+
+int
+proc_ppc64_pmc_hw_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int n, file;
+ char *buffer = NULL;
+
+ if (count < 0) return 0;
+ spin_lock(&proc_ppc64_lock);
+
+ /* Figure out which file is being request. */
+ file = proc_ppc64_pmc_find_file(data);
+
+ /* Update the counters and the text buffer representation. */
+ buffer = ppc64_pmc_hw(file);
+
+ /* Put the data into the requestor's buffer. */
+ n = proc_ppc64_pmc_read(page, start, off, count, eof, buffer);
+
+ spin_unlock(&proc_ppc64_lock);
+ return n;
+}
+
+/*
+ * DRENG the remainder of these functions still need work ...
+ */
+void pmc_proc_init(struct proc_dir_entry *iSeries_proc)
+{
+ struct proc_dir_entry *ent = NULL;
+
+ ent = create_proc_entry("lpevents", S_IFREG|S_IRUGO, iSeries_proc);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_get_lpevents;
+ ent->write_proc = proc_reset_lpevents;
+
+ ent = create_proc_entry("titanTod", S_IFREG|S_IRUGO, iSeries_proc);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_get_titanTod;
+ ent->write_proc = NULL;
+
+ pmc_proc_root = proc_mkdir("pmc", iSeries_proc);
+ if (!pmc_proc_root) return;
+
+ ent = create_proc_entry("control", S_IFREG|S_IRUSR|S_IWUSR, pmc_proc_root);
+ if (!ent) return;
+ ent->nlink = 1;
+ ent->data = (void *)0;
+ ent->read_proc = proc_pmc_get_control;
+ ent->write_proc = proc_pmc_set_control;
+
+}
+
+static int pmc_calc_metrics( char *page, char **start, off_t off, int count, int *eof, int len)
+{
+ if ( len <= off+count)
+ *eof = 1;
+ *start = page+off;
+ len -= off;
+ if ( len > count )
+ len = count;
+ if ( len < 0 )
+ len = 0;
+ return len;
+}
+
+static char * lpEventTypes[9] = {
+ "Hypervisor\t\t",
+ "Machine Facilities\t",
+ "Session Manager\t",
+ "SPD I/O\t\t",
+ "Virtual Bus\t\t",
+ "PCI I/O\t\t",
+ "RIO I/O\t\t",
+ "Virtual Lan\t\t",
+ "Virtual I/O\t\t"
+ };
+
+
+int proc_get_lpevents
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ unsigned i;
+ int len = 0;
+
+ len += sprintf( page+len, "LpEventQueue 0\n" );
+ len += sprintf( page+len, " events processed:\t%lu\n",
+ (unsigned long)xItLpQueue.xLpIntCount );
+ for (i=0; i<9; ++i) {
+ len += sprintf( page+len, " %s %10lu\n",
+ lpEventTypes[i],
+ (unsigned long)xItLpQueue.xLpIntCountByType[i] );
+ }
+ len += sprintf( page+len, "\n events processed by processor:\n" );
+ for (i=0; i<naca->processorCount; ++i) {
+ len += sprintf( page+len, " CPU%02d %10u\n",
+ i, xPaca[i].lpEvent_count );
+ }
+
+ return pmc_calc_metrics( page, start, off, count, eof, len );
+
+}
+
+int proc_reset_lpevents( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ return count;
+}
+
+static unsigned long startTitan = 0;
+static unsigned long startTb = 0;
+
+
+int proc_get_titanTod
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+ unsigned long tb0, titan_tod;
+
+ tb0 = get_tb();
+ titan_tod = HvCallXm_loadTod();
+
+ len += sprintf( page+len, "Titan\n" );
+ len += sprintf( page+len, " time base = %016lx\n", tb0 );
+ len += sprintf( page+len, " titan tod = %016lx\n", titan_tod );
+ len += sprintf( page+len, " xProcFreq = %016x\n", xIoHriProcessorVpd[0].xProcFreq );
+ len += sprintf( page+len, " xTimeBaseFreq = %016x\n", xIoHriProcessorVpd[0].xTimeBaseFreq );
+ len += sprintf( page+len, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy );
+ len += sprintf( page+len, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec );
+
+ if ( !startTitan ) {
+ startTitan = titan_tod;
+ startTb = tb0;
+ }
+ else {
+ unsigned long titan_usec = (titan_tod - startTitan) >> 12;
+ unsigned long tb_ticks = (tb0 - startTb);
+ unsigned long titan_jiffies = titan_usec / (1000000/HZ);
+ unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
+ unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec;
+ unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
+ unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
+ unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
+ unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec;
+ unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec;
+
+ len += sprintf( page+len, " titan elapsed = %lu uSec\n", titan_usec);
+ len += sprintf( page+len, " tb elapsed = %lu ticks\n", tb_ticks);
+ len += sprintf( page+len, " titan jiffies = %lu.%04lu \n", titan_jiffies, titan_jiff_rem_usec );
+ len += sprintf( page+len, " tb jiffies = %lu.%04lu\n", tb_jiffies, tb_jiff_rem_usec );
+ len += sprintf( page+len, " new tb_ticks_per_jiffy = %lu\n", new_tb_ticks_per_jiffy );
+
+ }
+
+ return pmc_calc_metrics( page, start, off, count, eof, len );
+}
+
+int proc_pmc_get_control
+(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+
+ if ( proc_pmc_control_mode == PMC_CONTROL_CPI ) {
+ unsigned long mach_cycles = mfspr( PMC5 );
+ unsigned long inst_complete = mfspr( PMC4 );
+ unsigned long inst_dispatch = mfspr( PMC3 );
+ unsigned long thread_active_run = mfspr( PMC1 );
+ unsigned long thread_active = mfspr( PMC2 );
+ unsigned long cpi = 0;
+ unsigned long cpithou = 0;
+ unsigned long remain;
+
+ if ( inst_complete ) {
+ cpi = thread_active_run / inst_complete;
+ remain = thread_active_run % inst_complete;
+ if ( inst_complete > 1000000 )
+ cpithou = remain / ( inst_complete / 1000 );
+ else
+ cpithou = ( remain * 1000 ) / inst_complete;
+ }
+ len += sprintf( page+len, "PMC CPI Mode\nRaw Counts\n" );
+ len += sprintf( page+len, "machine cycles : %12lu\n", mach_cycles );
+ len += sprintf( page+len, "thread active cycles : %12lu\n\n", thread_active );
+
+ len += sprintf( page+len, "instructions completed : %12lu\n", inst_complete );
+ len += sprintf( page+len, "instructions dispatched : %12lu\n", inst_dispatch );
+ len += sprintf( page+len, "thread active run cycles : %12lu\n", thread_active_run );
+
+ len += sprintf( page+len, "thread active run cycles/instructions completed\n" );
+ len += sprintf( page+len, "CPI = %lu.%03lu\n", cpi, cpithou );
+
+ }
+ else if ( proc_pmc_control_mode == PMC_CONTROL_TLB ) {
+ len += sprintf( page+len, "PMC TLB Mode\n" );
+ len += sprintf( page+len, "I-miss count : %12lu\n", mfspr( PMC1 ) );
+ len += sprintf( page+len, "I-miss latency : %12lu\n", mfspr( PMC2 ) );
+ len += sprintf( page+len, "D-miss count : %12lu\n", mfspr( PMC3 ) );
+ len += sprintf( page+len, "D-miss latency : %12lu\n", mfspr( PMC4 ) );
+ len += sprintf( page+len, "IERAT miss count : %12lu\n", mfspr( PMC5 ) );
+ len += sprintf( page+len, "D-reference count : %12lu\n", mfspr( PMC6 ) );
+ len += sprintf( page+len, "miss PTEs searched : %12lu\n", mfspr( PMC7 ) );
+ len += sprintf( page+len, "miss >8 PTEs searched : %12lu\n", mfspr( PMC8 ) );
+ }
+ /* IMPLEMENT ME */
+ return pmc_calc_metrics( page, start, off, count, eof, len );
+}
+
+unsigned long proc_pmc_conv_int( const char *buf, unsigned count )
+{
+ const char * p;
+ char b0, b1;
+ unsigned v, multiplier, mult, i;
+ unsigned long val;
+ multiplier = 10;
+ p = buf;
+ if ( count >= 3 ) {
+ b0 = buf[0];
+ b1 = buf[1];
+ if ( ( b0 == '0' ) &&
+ ( ( b1 == 'x' ) || ( b1 == 'X' ) ) ) {
+ p = buf + 2;
+ count -= 2;
+ multiplier = 16;
+ }
+
+ }
+ val = 0;
+ for ( i=0; i<count; ++i ) {
+ b0 = *p++;
+ v = 0;
+ mult = multiplier;
+ if ( ( b0 >= '0' ) && ( b0 <= '9' ) )
+ v = b0 - '0';
+ else if ( multiplier == 16 ) {
+ if ( ( b0 >= 'a' ) && ( b0 <= 'f' ) )
+ v = b0 - 'a' + 10;
+ else if ( ( b0 >= 'A' ) && ( b0 <= 'F' ) )
+ v = b0 - 'A' + 10;
+ else
+ mult = 1;
+ }
+ else
+ mult = 1;
+ val *= mult;
+ val += v;
+ }
+
+ return val;
+
+}
+
+static inline void proc_pmc_stop(void)
+{
+ /* Freeze all counters, leave everything else alone */
+ mtspr( MMCR0, mfspr( MMCR0 ) | 0x80000000 );
+}
+
+static inline void proc_pmc_start(void)
+{
+ /* Unfreeze all counters, leave everything else alone */
+ mtspr( MMCR0, mfspr( MMCR0 ) & ~0x80000000 );
+
+}
+
+static inline void proc_pmc_reset(void)
+{
+ /* Clear all the PMCs to zeros
+ * Assume a "stop" has already frozen the counters
+ * Clear all the PMCs
+ */
+ mtspr( PMC1, 0 );
+ mtspr( PMC2, 0 );
+ mtspr( PMC3, 0 );
+ mtspr( PMC4, 0 );
+ mtspr( PMC5, 0 );
+ mtspr( PMC6, 0 );
+ mtspr( PMC7, 0 );
+ mtspr( PMC8, 0 );
+
+}
+
+static inline void proc_pmc_cpi(void)
+{
+ /* Configure the PMC registers to count cycles and instructions */
+ /* so we can compute cpi */
+ /*
+ * MMCRA[30] = 1 Don't count in wait state (CTRL[31]=0)
+ * MMCR0[6] = 1 Freeze counters when any overflow
+ * MMCR0[19:25] = 0x01 PMC1 counts Thread Active Run Cycles
+ * MMCR0[26:31] = 0x05 PMC2 counts Thread Active Cycles
+ * MMCR1[0:4] = 0x07 PMC3 counts Instructions Dispatched
+ * MMCR1[5:9] = 0x03 PMC4 counts Instructions Completed
+ * MMCR1[10:14] = 0x06 PMC5 counts Machine Cycles
+ *
+ */
+
+ proc_pmc_control_mode = PMC_CONTROL_CPI;
+
+ /* Indicate to hypervisor that we are using the PMCs */
+ ((struct Paca *)mfspr(SPRG3))->xLpPacaPtr->xPMCRegsInUse = 1;
+
+ /* Freeze all counters */
+ mtspr( MMCR0, 0x80000000 );
+ mtspr( MMCR1, 0x00000000 );
+
+ /* Clear all the PMCs */
+ mtspr( PMC1, 0 );
+ mtspr( PMC2, 0 );
+ mtspr( PMC3, 0 );
+ mtspr( PMC4, 0 );
+ mtspr( PMC5, 0 );
+ mtspr( PMC6, 0 );
+ mtspr( PMC7, 0 );
+ mtspr( PMC8, 0 );
+
+ /* Freeze counters in Wait State (CTRL[31]=0) */
+ mtspr( MMCRA, 0x00000002 );
+
+ /* PMC3<-0x07, PMC4<-0x03, PMC5<-0x06 */
+ mtspr( MMCR1, 0x38cc0000 );
+
+ mb();
+
+ /* PMC1<-0x01, PMC2<-0x05
+ * Start all counters
+ */
+ mtspr( MMCR0, 0x02000045 );
+
+}
+
+static inline void proc_pmc_tlb(void)
+{
+ /* Configure the PMC registers to count tlb misses */
+ /*
+ * MMCR0[6] = 1 Freeze counters when any overflow
+ * MMCR0[19:25] = 0x55 Group count
+ * PMC1 counts I misses
+ * PMC2 counts I miss duration (latency)
+ * PMC3 counts D misses
+ * PMC4 counts D miss duration (latency)
+ * PMC5 counts IERAT misses
+ * PMC6 counts D references (including PMC7)
+ * PMC7 counts miss PTEs searched
+ * PMC8 counts miss >8 PTEs searched
+ *
+ */
+
+ proc_pmc_control_mode = PMC_CONTROL_TLB;
+
+ /* Indicate to hypervisor that we are using the PMCs */
+ ((struct Paca *)mfspr(SPRG3))->xLpPacaPtr->xPMCRegsInUse = 1;
+
+ /* Freeze all counters */
+ mtspr( MMCR0, 0x80000000 );
+ mtspr( MMCR1, 0x00000000 );
+
+ /* Clear all the PMCs */
+ mtspr( PMC1, 0 );
+ mtspr( PMC2, 0 );
+ mtspr( PMC3, 0 );
+ mtspr( PMC4, 0 );
+ mtspr( PMC5, 0 );
+ mtspr( PMC6, 0 );
+ mtspr( PMC7, 0 );
+ mtspr( PMC8, 0 );
+
+ mtspr( MMCRA, 0x00000000 );
+
+ mb();
+
+ /* PMC1<-0x55
+ * Start all counters
+ */
+ mtspr( MMCR0, 0x02001540 );
+
+}
+
+int proc_pmc_set_control( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ if ( ! strncmp( buffer, "stop", 4 ) )
+ proc_pmc_stop();
+ else if ( ! strncmp( buffer, "start", 5 ) )
+ proc_pmc_start();
+ else if ( ! strncmp( buffer, "reset", 5 ) )
+ proc_pmc_reset();
+ else if ( ! strncmp( buffer, "cpi", 3 ) )
+ proc_pmc_cpi();
+ else if ( ! strncmp( buffer, "tlb", 3 ) )
+ proc_pmc_tlb();
+
+ /* IMPLEMENT ME */
+ return count;
+}
+
+int proc_pmc_set_mmcr0( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ v = v & ~0x04000000; /* Don't allow interrupts for now */
+ if ( v & ~0x80000000 ) /* Inform hypervisor we are using PMCs */
+ ((struct Paca *)mfspr(SPRG3))->xLpPacaPtr->xPMCRegsInUse = 1;
+ else
+ ((struct Paca *)mfspr(SPRG3))->xLpPacaPtr->xPMCRegsInUse = 0;
+ mtspr( MMCR0, v );
+
+ return count;
+}
+
+int proc_pmc_set_mmcr1( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( MMCR1, v );
+
+ return count;
+}
+
+int proc_pmc_set_mmcra( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ v = v & ~0x00008000; /* Don't allow interrupts for now */
+ mtspr( MMCRA, v );
+
+ return count;
+}
+
+
+int proc_pmc_set_pmc1( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC1, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc2( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC2, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc3( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC3, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc4( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC4, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc5( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC5, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc6( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC6, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc7( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC7, v );
+
+ return count;
+}
+
+int proc_pmc_set_pmc8( struct file *file, const char *buffer, unsigned long count, void *data )
+{
+ unsigned long v;
+ v = proc_pmc_conv_int( buffer, count );
+ mtspr( PMC8, v );
+
+ return count;
+}
+
--- /dev/null
+/*
+ * linux/arch/ppc/kernel/process.c
+ *
+ * Derived from "arch/i386/kernel/process.c"
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
+ * Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/prom.h>
+#include <asm/ppcdebug.h>
+#include <asm/machdep.h>
+#include <asm/iSeries/HvCallHpt.h>
+#include <asm/Naca.h>
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs);
+extern unsigned long _get_SP(void);
+
+struct task_struct *last_task_used_math = NULL;
+
+struct mm_struct ioremap_mm = { pgd : ioremap_dir
+ ,page_table_lock : SPIN_LOCK_UNLOCKED };
+
+char *sysmap = NULL;
+unsigned long sysmap_size = 0;
+
+extern char __toc_start;
+
+void
+enable_kernel_fp(void)
+{
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+ giveup_fpu(current);
+ else
+ giveup_fpu(NULL); /* just enables FP for kernel */
+#else
+ giveup_fpu(last_task_used_math);
+#endif /* CONFIG_SMP */
+}
+
+int
+dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+{
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+ memcpy(fpregs, ¤t->thread.fpr[0], sizeof(*fpregs));
+ return 1;
+}
+
+void
+_switch_to(struct task_struct *prev, struct task_struct *new)
+{
+ struct thread_struct *new_thread, *old_thread;
+ unsigned long s;
+
+ __save_flags(s);
+ __cli();
+
+#ifdef CONFIG_SMP
+ /* avoid complexity of lazy save/restore of fpu
+ * by just saving it every time we switch out if
+ * this task used the fpu during the last quantum.
+ *
+ * If it tries to use the fpu again, it'll trap and
+ * reload its fp regs. So we don't have to do a restore
+ * every switch, just a save.
+ * -- Cort
+ */
+ if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) )
+ giveup_fpu(prev);
+#endif /* CONFIG_SMP */
+
+ new_thread = &new->thread;
+ old_thread = ¤t->thread;
+ _switch(old_thread, new_thread);
+ __restore_flags(s);
+}
+
+void show_regs(struct pt_regs * regs)
+{
+ int i;
+
+ printk("NIP: %016lX XER: %016lX LR: %016lX REGS: %p TRAP: %04lx %s\n",
+ regs->nip, regs->xer, regs->link, regs,regs->trap, print_tainted());
+ printk("MSR: %016lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
+ regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
+ regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
+ regs->msr&MSR_IR ? 1 : 0,
+ regs->msr&MSR_DR ? 1 : 0);
+ printk("TASK = %p[%d] '%s' ",
+ current, current->pid, current->comm);
+ printk("Last syscall: %ld ", current->thread.last_syscall);
+ printk("\nlast math %p ", last_task_used_math);
+
+#ifdef CONFIG_SMP
+ /* printk(" CPU: %d last CPU: %d", current->processor,current->last_processor); */
+#endif /* CONFIG_SMP */
+
+ printk("\n");
+ for (i = 0; i < 32; i++)
+ {
+ long r;
+ if ((i % 4) == 0)
+ {
+ printk("GPR%02d: ", i);
+ }
+
+ if ( __get_user(r, &(regs->gpr[i])) )
+ return;
+
+ printk("%016lX ", r);
+ if ((i % 4) == 3)
+ {
+ printk("\n");
+ }
+ }
+}
+
+void exit_thread(void)
+{
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+}
+
+void flush_thread(void)
+{
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+}
+
+void
+release_thread(struct task_struct *t)
+{
+}
+
+/*
+ * Copy a thread..
+ */
+int
+copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ unsigned long msr;
+ struct pt_regs *childregs, *kregs;
+ extern void ret_from_fork(void);
+
+ /* XXX get rid of the -2 Anton */
+
+ /* Copy registers */
+ childregs = ((struct pt_regs *)
+ ((unsigned long)p->thread_info + THREAD_SIZE
+ - STACK_FRAME_OVERHEAD)) - 2;
+ *childregs = *regs;
+ childregs->gpr[3] = 0; /* Result from fork() */
+ p->thread.regs = childregs;
+ p->thread.ksp = (unsigned long) childregs - STACK_FRAME_OVERHEAD;
+ p->thread.ksp -= sizeof(struct pt_regs ) + STACK_FRAME_OVERHEAD;
+ kregs = (struct pt_regs *)(p->thread.ksp + STACK_FRAME_OVERHEAD);
+ /* The PPC64 compiler makes use of a TOC to contain function
+ * pointers. The function (ret_from_except) is actually a pointer
+ * to the TOC entry. The first entry is a pointer to the actual
+ * function.
+ */
+ kregs->nip = *((unsigned long *)ret_from_fork);
+ asm volatile("mfmsr %0" : "=r" (msr):);
+ kregs->msr = msr;
+ kregs->gpr[1] = (unsigned long)childregs - STACK_FRAME_OVERHEAD;
+ kregs->gpr[2] = (((unsigned long)&__toc_start) + 0x8000);
+
+ if (usp >= (unsigned long) regs) {
+ /* Stack is in kernel space - must adjust */
+ childregs->gpr[1] = (unsigned long)(childregs + 1);
+ *((unsigned long *) childregs->gpr[1]) = 0;
+ childregs->gpr[13] = (unsigned long) p;
+ } else {
+ /* Provided stack is in user space */
+ childregs->gpr[1] = usp;
+ }
+ p->thread.last_syscall = -1;
+
+ /*
+ * copy fpu info - assume lazy fpu switch now always
+ * -- Cort
+ */
+ if (regs->msr & MSR_FP) {
+ giveup_fpu(current);
+ childregs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ }
+ memcpy(&p->thread.fpr, ¤t->thread.fpr, sizeof(p->thread.fpr));
+ p->thread.fpscr = current->thread.fpscr;
+
+ return 0;
+}
+
+/*
+ * Set up a thread for executing a new program
+ */
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
+{
+ /* NIP is *really* a pointer to the function descriptor for
+ * the elf _start routine. The first entry in the function
+ * descriptor is the entry address of _start and the second
+ * entry is the TOC value we need to use.
+ */
+ unsigned long *entry = (unsigned long *)nip;
+ unsigned long *toc = entry + 1;
+
+
+ set_fs(USER_DS);
+ memset(regs->gpr, 0, sizeof(regs->gpr));
+ memset(®s->ctr, 0, 4 * sizeof(regs->ctr));
+ __get_user(regs->nip, entry);
+ regs->gpr[1] = sp;
+ __get_user(regs->gpr[2], toc);
+ regs->msr = MSR_USER64;
+ if (last_task_used_math == current)
+ last_task_used_math = 0;
+ current->thread.fpscr = 0;
+}
+
+asmlinkage int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
+ struct pt_regs *regs)
+{
+ unsigned long clone_flags = p1;
+ int res;
+
+ res = do_fork(clone_flags, regs->gpr[1], regs, 0);
+#ifdef CONFIG_SMP
+ /* When we clone the idle task we keep the same pid but
+ * the return value of 0 for both causes problems.
+ * -- Cort
+ */
+ if ((current->pid == 0) && (current == &init_task))
+ res = 1;
+#endif /* CONFIG_SMP */
+
+ return res;
+}
+
+asmlinkage int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
+ struct pt_regs *regs)
+{
+ int res;
+
+ res = do_fork(SIGCHLD, regs->gpr[1], regs, 0);
+
+#ifdef CONFIG_SMP
+ /* When we clone the idle task we keep the same pid but
+ * the return value of 0 for both causes problems.
+ * -- Cort
+ */
+ if ((current->pid == 0) && (current == &init_task))
+ res = 1;
+#endif /* CONFIG_SMP */
+
+ return res;
+}
+
+asmlinkage int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
+ struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
+}
+
+asmlinkage int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4, unsigned long a5,
+ struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ filename = getname((char *) a0);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+
+ error = do_execve(filename, (char **) a1, (char **) a2, regs);
+
+ if (error == 0)
+ current->ptrace &= ~PT_DTRACE;
+ putname(filename);
+
+out:
+ return error;
+}
+
+void initialize_paca_hardware_interrupt_stack(void)
+{
+ extern struct Naca *naca;
+
+ int i;
+ unsigned long stack;
+ unsigned long end_of_stack =0;
+
+ for (i=1; i < naca->processorCount; i++) {
+ /* Carve out storage for the hardware interrupt stack */
+ stack = __get_free_pages(GFP_KERNEL, get_order(8*PAGE_SIZE));
+
+ if ( !stack ) {
+ printk("ERROR, cannot find space for hardware stack.\n");
+ panic(" no hardware stack ");
+ }
+
+
+ /* Store the stack value in the PACA for the processor */
+ xPaca[i].xHrdIntStack = stack + (8*PAGE_SIZE) - STACK_FRAME_OVERHEAD;
+ xPaca[i].xHrdIntCount = 0;
+
+ }
+
+ /*
+ * __get_free_pages() might give us a page > KERNBASE+256M which
+ * is mapped with large ptes so we can't set up the guard page.
+ */
+ if (__is_processor(PV_POWER4))
+ return;
+
+ for (i=0; i < naca->processorCount; i++) {
+ /* set page at the top of stack to be protected - prevent overflow */
+ end_of_stack = xPaca[i].xHrdIntStack - (8*PAGE_SIZE - STACK_FRAME_OVERHEAD);
+ ppc_md.hpte_updateboltedpp(PP_RXRX,end_of_stack);
+ }
+}
+
+extern char _stext[], _etext[];
+
+char * ppc_find_proc_name( unsigned * p, char * buf, unsigned buflen )
+{
+ unsigned long tb_flags;
+ unsigned short name_len;
+ unsigned long tb_start, code_start, code_ptr, code_offset;
+ unsigned code_len;
+ strcpy( buf, "Unknown" );
+ code_ptr = (unsigned long)p;
+ code_offset = 0;
+ if ( ( (unsigned long)p >= (unsigned long)_stext ) && ( (unsigned long)p <= (unsigned long)_etext ) ) {
+ while ( (unsigned long)p <= (unsigned long)_etext ) {
+ if ( *p == 0 ) {
+ tb_start = (unsigned long)p;
+ ++p; /* Point to traceback flags */
+ tb_flags = *((unsigned long *)p);
+ p += 2; /* Skip over traceback flags */
+ if ( tb_flags & TB_NAME_PRESENT ) {
+ if ( tb_flags & TB_PARMINFO )
+ ++p; /* skip over parminfo data */
+ if ( tb_flags & TB_HAS_TBOFF ) {
+ code_len = *p; /* get code length */
+ code_start = tb_start - code_len;
+ code_offset = code_ptr - code_start + 1;
+ if ( code_offset > 0x100000 )
+ break;
+ ++p; /* skip over code size */
+ }
+ name_len = *((unsigned short *)p);
+ if ( name_len > (buflen-20) )
+ name_len = buflen-20;
+ memcpy( buf, ((char *)p)+2, name_len );
+ buf[name_len] = 0;
+ if ( code_offset )
+ sprintf( buf+name_len, "+0x%lx", code_offset-1 );
+ }
+ break;
+ }
+ ++p;
+ }
+ }
+ return buf;
+}
+
+void
+print_backtrace(unsigned long *sp)
+{
+ int cnt = 0;
+ unsigned long i;
+ char name_buf[256];
+
+ printk("Call backtrace: \n");
+ while (sp) {
+ if (__get_user( i, &sp[2] ))
+ break;
+ printk("%016lX ", i);
+ printk("%s\n", ppc_find_proc_name( (unsigned *)i, name_buf, 256 ));
+ if (cnt > 32) break;
+ if (__get_user(sp, (unsigned long **)sp))
+ break;
+ }
+ printk("\n");
+}
+
+/*
+ * These bracket the sleeping functions..
+ */
+extern void scheduling_functions_start_here(void);
+extern void scheduling_functions_end_here(void);
+#define first_sched (*(unsigned long *)scheduling_functions_start_here)
+#define last_sched (*(unsigned long *)scheduling_functions_end_here)
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ip, sp;
+ unsigned long stack_page = (unsigned long)p->thread_info;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ sp = p->thread.ksp;
+ do {
+ sp = *(unsigned long *)sp;
+ if (sp < (stack_page + sizeof(struct thread_struct)) ||
+ sp >= (stack_page + THREAD_SIZE))
+ return 0;
+ if (count > 0) {
+ ip = *(unsigned long *)(sp + 16);
+ if (ip < first_sched || ip >= last_sched)
+ return (ip & 0xFFFFFFFF);
+ }
+ } while (count++ < 16);
+ return 0;
+}
+
+void show_trace_task(struct task_struct *p)
+{
+ unsigned long ip, sp;
+ unsigned long stack_page = (unsigned long)p->thread_info;
+ int count = 0;
+
+ if (!p)
+ return;
+
+ printk("Call Trace: ");
+ sp = p->thread.ksp;
+ do {
+ sp = *(unsigned long *)sp;
+ if (sp < (stack_page + sizeof(struct thread_struct)) ||
+ sp >= (stack_page + THREAD_SIZE))
+ break;
+ if (count > 0) {
+ ip = *(unsigned long *)(sp + 16);
+ printk("[%016lx] ", ip);
+ }
+ } while (count++ < 16);
+ printk("\n");
+}
--- /dev/null
+/*
+ *
+ *
+ * Procedures for interfacing to Open Firmware.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#if 0
+#define DEBUG_YABOOT
+#endif
+
+#if 0
+#define DEBUG_PROM
+#endif
+
+#include <stdarg.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/blk.h>
+
+#ifdef DEBUG_YABOOT
+#define call_yaboot(FUNC,...) \
+ do { \
+ if (FUNC) { \
+ struct prom_t *_prom = PTRRELOC(&prom); \
+ unsigned long prom_entry = _prom->entry;\
+ _prom->entry = (unsigned long)(FUNC); \
+ enter_prom(__VA_ARGS__); \
+ _prom->entry = prom_entry; \
+ } \
+ } while (0)
+#else
+#define call_yaboot(FUNC,...) do { ; } while (0)
+#endif
+
+#include <asm/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/lmb.h>
+#include <asm/abs_addr.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/bitops.h>
+#include <asm/Naca.h>
+#include <asm/pci.h>
+#include "open_pic.h"
+#include <asm/bootinfo.h>
+#include <asm/ppcdebug.h>
+
+#ifdef CONFIG_FB
+#include <asm/linux_logo.h>
+#endif
+
+extern char _end[];
+
+/*
+ * prom_init() is called very early on, before the kernel text
+ * and data have been mapped to KERNELBASE. At this point the code
+ * is running at whatever address it has been loaded at, so
+ * references to extern and static variables must be relocated
+ * explicitly. The procedure reloc_offset() returns the address
+ * we're currently running at minus the address we were linked at.
+ * (Note that strings count as static variables.)
+ *
+ * Because OF may have mapped I/O devices into the area starting at
+ * KERNELBASE, particularly on CHRP machines, we can't safely call
+ * OF once the kernel has been mapped to KERNELBASE. Therefore all
+ * OF calls should be done within prom_init(), and prom_init()
+ * and all routines called within it must be careful to relocate
+ * references as necessary.
+ *
+ * Note that the bss is cleared *after* prom_init runs, so we have
+ * to make sure that any static or extern variables it accesses
+ * are put in the data segment.
+ */
+
+
+#define PROM_BUG() do { \
+ prom_print(RELOC("kernel BUG at ")); \
+ prom_print(RELOC(__FILE__)); \
+ prom_print(RELOC(":")); \
+ prom_print_hex(__LINE__); \
+ prom_print(RELOC("!\n")); \
+ __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
+} while (0)
+
+
+
+struct pci_reg_property {
+ struct pci_address addr;
+ u32 size_hi;
+ u32 size_lo;
+};
+
+
+struct isa_reg_property {
+ u32 space;
+ u32 address;
+ u32 size;
+};
+
+struct pci_intr_map {
+ struct pci_address addr;
+ u32 dunno;
+ phandle int_ctrler;
+ u32 intr;
+};
+
+
+typedef unsigned long interpret_func(struct device_node *, unsigned long,
+ int, int);
+#if 0
+static interpret_func interpret_pci_props;
+#endif
+static unsigned long interpret_pci_props(struct device_node *, unsigned long,
+ int, int);
+
+static interpret_func interpret_isa_props;
+static interpret_func interpret_root_props;
+
+#ifndef FB_MAX /* avoid pulling in all of the fb stuff */
+#define FB_MAX 8
+#endif
+
+
+struct prom_t prom = {
+ 0, /* entry */
+ 0, /* chosen */
+ 0, /* cpu */
+ 0, /* stdout */
+ 0, /* disp_node */
+ {0,0,0,{0},NULL}, /* args */
+ 0, /* version */
+ 32, /* encode_phys_size */
+ 0 /* bi_rec pointer */
+#ifdef DEBUG_YABOOT
+ ,NULL /* yaboot */
+#endif
+};
+
+
+char *prom_display_paths[FB_MAX] __initdata = { 0, };
+unsigned int prom_num_displays = 0;
+char *of_stdout_device = 0;
+
+extern struct rtas_t rtas;
+extern unsigned long klimit;
+extern unsigned long embedded_sysmap_end;
+extern struct Naca *naca;
+extern struct lmb lmb;
+#ifdef CONFIG_MSCHUNKS
+extern struct msChunks msChunks;
+#endif /* CONFIG_MSCHUNKS */
+
+#define MAX_PHB 16 * 3 // 16 Towers * 3 PHBs/tower
+struct _of_tce_table of_tce_table[MAX_PHB + 1] = {{0, 0, 0}};
+
+char *bootpath = 0;
+char *bootdevice = 0;
+
+struct device_node *allnodes = 0;
+
+#define UNDEFINED_IRQ 0xffff
+unsigned short real_irq_to_virt_map[NR_HW_IRQS];
+unsigned short virt_irq_to_real_map[NR_IRQS];
+int last_virt_irq = 2; /* index of last virt_irq. Skip through IPI */
+
+static unsigned long call_prom(const char *service, int nargs, int nret, ...);
+static void prom_exit(void);
+static unsigned long copy_device_tree(unsigned long);
+static unsigned long inspect_node(phandle, struct device_node *, unsigned long,
+ unsigned long, struct device_node ***);
+static unsigned long finish_node(struct device_node *, unsigned long,
+ interpret_func *, int, int);
+static unsigned long finish_node_interrupts(struct device_node *, unsigned long);
+static unsigned long check_display(unsigned long);
+static int prom_next_node(phandle *);
+static struct bi_record * prom_bi_rec_verify(struct bi_record *);
+static unsigned long prom_bi_rec_reserve(unsigned long);
+static struct device_node *find_phandle(phandle);
+
+#ifdef CONFIG_MSCHUNKS
+static unsigned long prom_initialize_mschunks(unsigned long);
+#endif /* CONFIG_MSCHUNKS */
+
+extern unsigned long reloc_offset(void);
+
+extern void enter_prom(void *dummy,...);
+
+void cacheable_memzero(void *, unsigned int);
+
+extern char cmd_line[512]; /* XXX */
+unsigned long dev_tree_size;
+
+#ifdef CONFIG_HMT
+struct {
+ unsigned int pir;
+ unsigned int threadid;
+} hmt_thread_data[NR_CPUS] = {0};
+#endif /* CONFIG_HMT */
+
+char testString[] = "LINUX\n";
+
+
+/* This is the one and *ONLY* place where we actually call open
+ * firmware from, since we need to make sure we're running in 32b
+ * mode when we do. We switch back to 64b mode upon return.
+ */
+
+static unsigned long __init
+call_prom(const char *service, int nargs, int nret, ...)
+{
+ int i;
+ unsigned long offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+ va_list list;
+
+ _prom->args.service = (u32)LONG_LSW(service);
+ _prom->args.nargs = nargs;
+ _prom->args.nret = nret;
+ _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
+
+ va_start(list, nret);
+ for (i=0; i < nargs ;i++)
+ _prom->args.args[i] = (prom_arg_t)LONG_LSW(va_arg(list, unsigned long));
+ va_end(list);
+
+ for (i=0; i < nret ;i++)
+ _prom->args.rets[i] = 0;
+
+ enter_prom(&_prom->args);
+
+ return (unsigned long)((nret > 0) ? _prom->args.rets[0] : 0);
+}
+
+
+static void __init
+prom_exit()
+{
+ unsigned long offset = reloc_offset();
+
+ call_prom(RELOC("exit"), 0, 0);
+
+ for (;;) /* should never get here */
+ ;
+}
+
+void __init
+prom_enter(void)
+{
+ unsigned long offset = reloc_offset();
+
+ call_prom(RELOC("enter"), 0, 0);
+}
+
+
+void __init
+prom_print(const char *msg)
+{
+ const char *p, *q;
+ unsigned long offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+
+ if (_prom->stdout == 0)
+ return;
+
+ for (p = msg; *p != 0; p = q) {
+ for (q = p; *q != 0 && *q != '\n'; ++q)
+ ;
+ if (q > p)
+ call_prom(RELOC("write"), 3, 1, _prom->stdout,
+ p, q - p);
+ if (*q != 0) {
+ ++q;
+ call_prom(RELOC("write"), 3, 1, _prom->stdout,
+ RELOC("\r\n"), 2);
+ }
+ }
+}
+
+void
+prom_print_hex(unsigned long val)
+{
+ int i, nibbles = sizeof(val)*2;
+ char buf[sizeof(val)*2+1];
+
+ for (i = nibbles-1; i >= 0; i--) {
+ buf[i] = (val & 0xf) + '0';
+ if (buf[i] > '9')
+ buf[i] += ('a'-'0'-10);
+ val >>= 4;
+ }
+ buf[nibbles] = '\0';
+ prom_print(buf);
+}
+
+void
+prom_print_nl(void)
+{
+ unsigned long offset = reloc_offset();
+ prom_print(RELOC("\n"));
+}
+
+
+static unsigned long
+prom_initialize_naca(unsigned long mem)
+{
+ phandle node;
+ char type[64];
+ unsigned long num_cpus = 0;
+ unsigned long offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+ struct Naca *_naca = RELOC(naca);
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("prom_initialize_naca: start...\n"));
+#endif
+
+ _naca->pftSize = 0; /* ilog2 of htab size. computed below. */
+
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
+ type, sizeof(type));
+
+ if (!strcmp(type, RELOC("cpu"))) {
+ num_cpus += 1;
+
+ /* We're assuming *all* of the CPUs have the same
+ * d-cache and i-cache sizes... -Peter
+ */
+ if ( num_cpus == 1 ) {
+ u32 size;
+
+ call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("d-cache-line-size"),
+ &size, sizeof(size));
+
+ _naca->dCacheL1LineSize = size;
+ _naca->dCacheL1LogLineSize = __ilog2(size);
+ _naca->dCacheL1LinesPerPage = PAGE_SIZE / size;
+
+ call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("i-cache-line-size"),
+ &size, sizeof(size));
+
+ _naca->iCacheL1LineSize = size;
+ _naca->iCacheL1LogLineSize = __ilog2(size);
+ _naca->iCacheL1LinesPerPage = PAGE_SIZE / size;
+
+ if (RELOC(_machine) == _MACH_pSeriesLP) {
+ u32 pft_size[2];
+ call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("ibm,pft-size"),
+ &pft_size, sizeof(pft_size));
+ /* pft_size[0] is the NUMA CEC cookie */
+ _naca->pftSize = pft_size[1];
+ }
+ }
+ } else if (!strcmp(type, RELOC("serial"))) {
+ phandle isa, pci;
+ struct isa_reg_property reg;
+ union pci_range ranges;
+
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("ibm,aix-loc"), type, sizeof(type));
+
+ if (strcmp(type, RELOC("S1")))
+ continue;
+
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("reg"),
+ ®, sizeof(reg));
+
+ isa = call_prom(RELOC("parent"), 1, 1, node);
+ if (!isa)
+ PROM_BUG();
+ pci = call_prom(RELOC("parent"), 1, 1, isa);
+ if (!pci)
+ PROM_BUG();
+
+ call_prom(RELOC("getprop"), 4, 1, pci, RELOC("ranges"),
+ &ranges, sizeof(ranges));
+
+ if ( _prom->encode_phys_size == 32 )
+ _naca->serialPortAddr = ranges.pci32.phys+reg.address;
+ else {
+ _naca->serialPortAddr =
+ ((((unsigned long)ranges.pci64.phys_hi) << 32) |
+ (ranges.pci64.phys_lo)) + reg.address;
+ }
+ }
+ }
+
+ _naca->interrupt_controller = IC_INVALID;
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("name"),
+ type, sizeof(type));
+ if (strcmp(type, RELOC("interrupt-controller"))) {
+ continue;
+ }
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("compatible"),
+ type, sizeof(type));
+ if (strstr(type, RELOC("open-pic"))) {
+ _naca->interrupt_controller = IC_OPEN_PIC;
+ } else if (strstr(type, RELOC("ppc-xicp"))) {
+ _naca->interrupt_controller = IC_PPC_XIC;
+ } else {
+ prom_print(RELOC("prom: failed to recognize interrupt-controller\n"));
+ }
+ break;
+ }
+
+ if (_naca->interrupt_controller == IC_INVALID) {
+ prom_print(RELOC("prom: failed to find interrupt-controller\n"));
+ PROM_BUG();
+ }
+
+ /* We gotta have at least 1 cpu... */
+ if ( (_naca->processorCount = num_cpus) < 1 )
+ PROM_BUG();
+
+ _naca->physicalMemorySize = lmb_phys_mem_size();
+
+ if (RELOC(_machine) == _MACH_pSeries) {
+ unsigned long rnd_mem_size, pteg_count;
+
+ /* round mem_size up to next power of 2 */
+ rnd_mem_size = 1UL << __ilog2(_naca->physicalMemorySize);
+ if (rnd_mem_size < _naca->physicalMemorySize)
+ rnd_mem_size <<= 1;
+
+ /* # pages / 2 */
+ pteg_count = (rnd_mem_size >> (12 + 1));
+
+ _naca->pftSize = __ilog2(pteg_count << 7);
+ }
+
+ if (_naca->pftSize == 0) {
+ prom_print(RELOC("prom: failed to compute pftSize!\n"));
+ PROM_BUG();
+ }
+
+ /*
+ * Hardcode to GP size. I am not sure where to get this info
+ * in general, as there does not appear to be a slb-size OF
+ * entry. At least in Condor and earlier. DRENG
+ */
+ _naca->slb_size = 64;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("naca->processorCount = 0x"));
+ prom_print_hex(_naca->processorCount);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->physicalMemorySize = 0x"));
+ prom_print_hex(_naca->physicalMemorySize);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->pftSize = 0x"));
+ prom_print_hex(_naca->pftSize);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->dCacheL1LineSize = 0x"));
+ prom_print_hex(_naca->dCacheL1LineSize);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->dCacheL1LogLineSize = 0x"));
+ prom_print_hex(_naca->dCacheL1LogLineSize);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->dCacheL1LinesPerPage = 0x"));
+ prom_print_hex(_naca->dCacheL1LinesPerPage);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->iCacheL1LineSize = 0x"));
+ prom_print_hex(_naca->iCacheL1LineSize);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->iCacheL1LogLineSize = 0x"));
+ prom_print_hex(_naca->iCacheL1LogLineSize);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->iCacheL1LinesPerPage = 0x"));
+ prom_print_hex(_naca->iCacheL1LinesPerPage);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->serialPortAddr = 0x"));
+ prom_print_hex(_naca->serialPortAddr);
+ prom_print_nl();
+
+ prom_print(RELOC("naca->interrupt_controller = 0x"));
+ prom_print_hex(_naca->interrupt_controller);
+ prom_print_nl();
+
+ prom_print(RELOC("_machine = 0x"));
+ prom_print_hex(RELOC(_machine));
+ prom_print_nl();
+
+ prom_print(RELOC("prom_initialize_naca: end...\n"));
+#endif
+
+ return mem;
+}
+
+
+static unsigned long __init
+prom_initialize_lmb(unsigned long mem)
+{
+ phandle node;
+ char type[64];
+ unsigned long i, offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+ union lmb_reg_property reg;
+ unsigned long mem_size, lmb_base, lmb_size;
+ unsigned long num_regs, bytes_per_reg = (_prom->encode_phys_size*2)/8;
+
+#ifdef CONFIG_MSCHUNKS
+#if 1
+ /* Fix me: 630 3G-4G IO hack here... -Peter (PPPBBB) */
+ unsigned long io_base = 3UL<<30;
+ unsigned long io_size = 1UL<<30;
+ unsigned long have_630 = 1; /* assume we have a 630 */
+
+#else
+ unsigned long io_base = <real io base here>;
+ unsigned long io_size = <real io size here>;
+#endif
+#endif
+
+ lmb_init();
+
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
+ type, sizeof(type));
+
+ if (strcmp(type, RELOC("memory")))
+ continue;
+
+ num_regs = call_prom(RELOC("getprop"), 4, 1, node, RELOC("reg"),
+ ®, sizeof(reg)) / bytes_per_reg;
+
+ for (i=0; i < num_regs ;i++) {
+ if (_prom->encode_phys_size == 32) {
+ lmb_base = reg.addr32[i].address;
+ lmb_size = reg.addr32[i].size;
+ } else {
+ lmb_base = reg.addr64[i].address;
+ lmb_size = reg.addr64[i].size;
+ }
+
+#ifdef CONFIG_MSCHUNKS
+ if ( lmb_addrs_overlap(lmb_base,lmb_size,
+ io_base,io_size) ) {
+ /* If we really have dram here, then we don't
+ * have a 630! -Peter
+ */
+ have_630 = 0;
+ }
+#endif
+ if ( lmb_add(lmb_base, lmb_size) < 0 )
+ prom_print(RELOC("Too many LMB's, discarding this one...\n"));
+ else
+ mem_size =+ lmb_size;
+ }
+
+ }
+
+#ifdef CONFIG_MSCHUNKS
+ if ( have_630 && lmb_addrs_overlap(0,mem_size,io_base,io_size) )
+ lmb_add_io(io_base, io_size);
+#endif
+
+ lmb_analyze();
+
+#ifdef CONFIG_MSCHUNKS
+ mem = prom_initialize_mschunks(mem);
+#endif /* CONFIG_MSCHUNKS */
+
+ return mem;
+}
+
+
+static unsigned long __init
+prom_instantiate_rtas(unsigned long mem)
+{
+ unsigned long offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+ struct rtas_t *_rtas = PTRRELOC(&rtas);
+ ihandle prom_rtas;
+ u32 getprop_rval;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("prom_instantiate_rtas: start...\n"));
+#endif
+ prom_rtas = (ihandle)call_prom(RELOC("finddevice"), 1, 1, RELOC("/rtas"));
+ if (prom_rtas != (ihandle) -1) {
+ char hypertas_funcs[1024];
+ int rc;
+
+ if ((rc = call_prom(RELOC("getprop"),
+ 4, 1, prom_rtas,
+ RELOC("ibm,hypertas-functions"),
+ hypertas_funcs,
+ sizeof(hypertas_funcs))) > 0) {
+ RELOC(_machine) = _MACH_pSeriesLP;
+ }
+
+ call_prom(RELOC("getprop"),
+ 4, 1, prom_rtas,
+ RELOC("rtas-size"),
+ &getprop_rval,
+ sizeof(getprop_rval));
+ _rtas->size = getprop_rval;
+ prom_print(RELOC("instantiating rtas"));
+ if (_rtas->size != 0) {
+ /*
+ * Ask OF for some space for RTAS.
+ * Actually OF has bugs so we just arbitrarily
+ * use memory at the 6MB point.
+ */
+ // The new code...
+ mem = PAGE_ALIGN(mem);
+ _rtas->base = mem + offset - KERNELBASE;
+
+ mem += _rtas->size;
+ prom_print(RELOC(" at 0x"));
+ prom_print_hex(_rtas->base);
+
+ prom_rtas = (ihandle)call_prom(RELOC("open"),
+ 1, 1, RELOC("/rtas"));
+ prom_print(RELOC("..."));
+
+ if ((long)call_prom(RELOC("call-method"), 3, 2,
+ RELOC("instantiate-rtas"),
+ prom_rtas,
+ _rtas->base) >= 0) {
+ _rtas->entry = (long)_prom->args.rets[1];
+ }
+ }
+
+ if (_rtas->entry <= 0) {
+ prom_print(RELOC(" failed\n"));
+ } else {
+ prom_print(RELOC(" done\n"));
+ }
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("rtas->base = 0x"));
+ prom_print_hex(_rtas->base);
+ prom_print_nl();
+ prom_print(RELOC("rtas->entry = 0x"));
+ prom_print_hex(_rtas->entry);
+ prom_print_nl();
+ prom_print(RELOC("rtas->size = 0x"));
+ prom_print_hex(_rtas->size);
+ prom_print_nl();
+#endif
+ }
+#ifdef DEBUG_PROM
+ prom_print(RELOC("prom_instantiate_rtas: end...\n"));
+#endif
+
+ return mem;
+}
+
+unsigned long prom_strtoul(const char *cp)
+{
+ unsigned long result = 0,value;
+
+ while (*cp) {
+ value = *cp-'0';
+ result = result*10 + value;
+ cp++;
+ }
+
+ return result;
+}
+
+
+#ifdef CONFIG_MSCHUNKS
+static unsigned long
+prom_initialize_mschunks(unsigned long mem)
+{
+ unsigned long offset = reloc_offset();
+ struct lmb *_lmb = PTRRELOC(&lmb);
+ struct msChunks *_msChunks = PTRRELOC(&msChunks);
+ unsigned long i, pchunk = 0;
+ unsigned long mem_size = _lmb->memory.size;
+ unsigned long chunk_size = _lmb->memory.lcd_size;
+
+#if 1
+ /* Fix me: 630 3G-4G IO hack here... -Peter (PPPBBB) */
+ unsigned long io_base = 3UL<<30;
+ unsigned long io_size = 1UL<<30;
+
+ for (i=0; i < _lmb->memory.cnt ;i++) {
+ unsigned long base = _lmb->memory.region[i].base;
+ unsigned long size = _lmb->memory.region[i].size;
+ if ( lmb_addrs_overlap(base,size,io_base,io_size) ) {
+ /* If we really have dram here, then we don't
+ * have a 630! -Peter
+ */
+ io_base = mem_size;
+ io_size = 1;
+ break;
+ }
+ }
+#else
+ unsigned long io_base = <real io base here>;
+ unsigned long io_size = <real io size here>;
+#endif
+
+ if ( lmb_addrs_overlap(0,mem_size,io_base,io_size) ) {
+ lmb_add(io_base, io_size);
+ lmb_reserve(io_base, io_size);
+ }
+
+ mem = msChunks_alloc(mem, mem_size / chunk_size, chunk_size);
+
+ for (i=0; i < _lmb->memory.cnt ;i++) {
+ unsigned long base = _lmb->memory.region[i].base;
+ unsigned long size = _lmb->memory.region[i].size;
+ unsigned long achunk = addr_to_chunk(base);
+ unsigned long end_achunk = addr_to_chunk(base+size);
+ _lmb->memory.region[i].physbase = chunk_to_addr(pchunk);
+ for (; achunk < end_achunk ;) {
+ PTRRELOC(_msChunks->abs)[pchunk++] = achunk++;
+ }
+ }
+
+ return mem;
+}
+#endif /* CONFIG_MSCHUNKS */
+
+void
+prom_initialize_tce_table(void)
+{
+ phandle node;
+ ihandle phb_node;
+ unsigned long offset = reloc_offset();
+ char compatible[64], path[64], type[64];
+ unsigned long i, table = 0;
+ unsigned long base, vbase, align;
+ unsigned int minalign, minsize;
+ struct _of_tce_table *prom_tce_table = RELOC(of_tce_table);
+ unsigned long tce_entry, *tce_entryp;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("starting prom_initialize_tce_table\n"));
+#endif
+
+ /* Search all nodes looking for PHBs. */
+ for (node = 0; prom_next_node(&node); ) {
+ compatible[0] = 0;
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("compatible"),
+ compatible, sizeof(compatible));
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
+ type, sizeof(type));
+
+ if ((compatible[0] == 0) ||
+ ((strstr(compatible, RELOC("python")) == NULL) &&
+ (strstr(compatible, RELOC("Speedwagon")) == NULL))) {
+ continue;
+ }
+ if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL)) {
+ continue;
+ }
+
+ if (call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("tce-table-minalign"), &minalign,
+ sizeof(minalign)) < 0) {
+ minalign = 0;
+ }
+
+ if (call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("tce-table-minsize"), &minsize,
+ sizeof(minsize)) < 0) {
+ minsize = 4UL << 20;
+ }
+
+ /* Even though we read what OF wants, we just set the table
+ * size to 4 MB. This is enough to map 2GB of PCI DMA space.
+ * By doing this, we avoid the pitfalls of trying to DMA to
+ * MMIO space and the DMA alias hole.
+ */
+ minsize = 4UL << 20;
+
+ /* Align to the greater of the align or size */
+ align = (minalign < minsize) ? minsize : minalign;
+
+ /* Carve out storage for the TCE table. */
+ base = lmb_alloc(minsize, align);
+
+ if ( !base ) {
+ prom_print(RELOC("ERROR, cannot find space for TCE table.\n"));
+ prom_exit();
+ }
+
+ vbase = absolute_to_virt(base);
+
+ /* Save away the TCE table attributes for later use. */
+ prom_tce_table[table].node = node;
+ prom_tce_table[table].base = vbase;
+ prom_tce_table[table].size = minsize;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("TCE table: 0x"));
+ prom_print_hex(table);
+ prom_print_nl();
+
+ prom_print(RELOC("\tnode = 0x"));
+ prom_print_hex(node);
+ prom_print_nl();
+
+ prom_print(RELOC("\tbase = 0x"));
+ prom_print_hex(vbase);
+ prom_print_nl();
+
+ prom_print(RELOC("\tsize = 0x"));
+ prom_print_hex(minsize);
+ prom_print_nl();
+#endif
+
+ /* Initialize the table to have a one-to-one mapping
+ * over the allocated size.
+ */
+ tce_entryp = (unsigned long *)base;
+ for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
+ tce_entry = (i << PAGE_SHIFT);
+ tce_entry |= 0x3;
+ *tce_entryp = tce_entry;
+ }
+
+ /* Call OF to setup the TCE hardware */
+ if (call_prom(RELOC("package-to-path"), 3, 1, node,
+ path, 255) <= 0) {
+ prom_print(RELOC("package-to-path failed\n"));
+ } else {
+ prom_print(RELOC("opened "));
+ prom_print(path);
+ prom_print_nl();
+ }
+
+ phb_node = (ihandle)call_prom(RELOC("open"), 1, 1, path);
+ if ( (long)phb_node <= 0) {
+ prom_print(RELOC("open failed\n"));
+ } else {
+ prom_print(RELOC("open success\n"));
+ }
+ call_prom(RELOC("call-method"), 6, 0,
+ RELOC("set-64-bit-addressing"),
+ phb_node,
+ -1,
+ minsize,
+ base & 0xffffffff,
+ (base >> 32) & 0xffffffff);
+ call_prom(RELOC("close"), 1, 0, phb_node);
+
+ table++;
+ }
+
+ /* Flag the first invalid entry */
+ prom_tce_table[table].node = 0;
+#ifdef DEBUG_PROM
+ prom_print(RELOC("ending prom_initialize_tce_table\n"));
+#endif
+}
+
+/*
+ * With CHRP SMP we need to use the OF to start the other
+ * processors so we can't wait until smp_boot_cpus (the OF is
+ * trashed by then) so we have to put the processors into
+ * a holding pattern controlled by the kernel (not OF) before
+ * we destroy the OF.
+ *
+ * This uses a chunk of low memory, puts some holding pattern
+ * code there and sends the other processors off to there until
+ * smp_boot_cpus tells them to do something. The holding pattern
+ * checks that address until its cpu # is there, when it is that
+ * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
+ * of setting those values.
+ *
+ * We also use physical address 0x4 here to tell when a cpu
+ * is in its holding pattern code.
+ *
+ * Fixup comment... DRENG / PPPBBB - Peter
+ *
+ * -- Cort
+ */
+static void
+prom_hold_cpus(unsigned long mem)
+{
+ unsigned long i;
+ unsigned int reg;
+ phandle node;
+ unsigned long offset = reloc_offset();
+ char type[64], *path;
+ int cpuid = 0;
+ extern void __secondary_hold(void);
+ extern unsigned long __secondary_hold_spinloop;
+ extern unsigned long __secondary_hold_acknowledge;
+ unsigned long *spinloop = __v2a(&__secondary_hold_spinloop);
+ unsigned long *acknowledge = __v2a(&__secondary_hold_acknowledge);
+ unsigned long secondary_hold = (unsigned long)__v2a(*PTRRELOC((unsigned long *)__secondary_hold));
+ struct Naca *_naca = RELOC(naca);
+ struct Paca *_xPaca = PTRRELOC(&xPaca[0]);
+ struct prom_t *_prom = PTRRELOC(&prom);
+
+ /* Initially, we must have one active CPU. */
+ _naca->processorCount = 1;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("prom_hold_cpus: start...\n"));
+ prom_print(RELOC(" 1) spinloop = 0x"));
+ prom_print_hex(spinloop);
+ prom_print_nl();
+ prom_print(RELOC(" 1) *spinloop = 0x"));
+ prom_print_hex(*spinloop);
+ prom_print_nl();
+ prom_print(RELOC(" 1) acknowledge = 0x"));
+ prom_print_hex(acknowledge);
+ prom_print_nl();
+ prom_print(RELOC(" 1) *acknowledge = 0x"));
+ prom_print_hex(*acknowledge);
+ prom_print_nl();
+ prom_print(RELOC(" 1) secondary_hold = 0x"));
+ prom_print_hex(secondary_hold);
+ prom_print_nl();
+#endif
+
+ /* Set the common spinloop variable, so all of the secondary cpus
+ * will block when they are awakened from their OF spinloop.
+ * This must occur for both SMP and non SMP kernels, since OF will
+ * be trashed when we move the kernel.
+ */
+ *spinloop = 0;
+
+#ifdef CONFIG_HMT
+ for (i=0; i < NR_CPUS; i++) {
+ RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
+ }
+#endif
+ /* look for cpus */
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
+ type, sizeof(type));
+ if (strcmp(type, RELOC("cpu")) != 0)
+ continue;
+
+ /* Skip non-configured cpus. */
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("status"),
+ type, sizeof(type));
+ if (strcmp(type, RELOC("okay")) != 0)
+ continue;
+
+ reg = -1;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("reg"),
+ ®, sizeof(reg));
+
+ /* Only need to start secondary procs, not ourself. */
+ if ( reg == _prom->cpu )
+ continue;
+
+ path = (char *) mem;
+ memset(path, 0, 256);
+ if ((long) call_prom(RELOC("package-to-path"), 3, 1,
+ node, path, 255) < 0)
+ continue;
+
+ cpuid++;
+
+#ifdef DEBUG_PROM
+ prom_print_nl();
+ prom_print(RELOC("cpuid = 0x"));
+ prom_print_hex(cpuid);
+ prom_print_nl();
+ prom_print(RELOC("cpu hw idx = 0x"));
+ prom_print_hex(reg);
+ prom_print_nl();
+#endif
+ _xPaca[cpuid].xHwProcNum = reg;
+
+ prom_print(RELOC("starting cpu "));
+ prom_print(path);
+
+ /* Init the acknowledge var which will be reset by
+ * the secondary cpu when it awakens from its OF
+ * spinloop.
+ */
+ *acknowledge = (unsigned long)-1;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC(" 3) spinloop = 0x"));
+ prom_print_hex(spinloop);
+ prom_print_nl();
+ prom_print(RELOC(" 3) *spinloop = 0x"));
+ prom_print_hex(*spinloop);
+ prom_print_nl();
+ prom_print(RELOC(" 3) acknowledge = 0x"));
+ prom_print_hex(acknowledge);
+ prom_print_nl();
+ prom_print(RELOC(" 3) *acknowledge = 0x"));
+ prom_print_hex(*acknowledge);
+ prom_print_nl();
+ prom_print(RELOC(" 3) secondary_hold = 0x"));
+ prom_print_hex(secondary_hold);
+ prom_print_nl();
+ prom_print(RELOC(" 3) cpuid = 0x"));
+ prom_print_hex(cpuid);
+ prom_print_nl();
+#endif
+ call_prom(RELOC("start-cpu"), 3, 0, node, secondary_hold, cpuid);
+ prom_print(RELOC("..."));
+ for ( i = 0 ; (i < 100000000) &&
+ (*acknowledge == ((unsigned long)-1)); i++ ) ;
+#ifdef DEBUG_PROM
+ {
+ unsigned long *p = 0x0;
+ prom_print(RELOC(" 4) 0x0 = 0x"));
+ prom_print_hex(*p);
+ prom_print_nl();
+ }
+#endif
+ if (*acknowledge == cpuid) {
+ prom_print(RELOC("ok\n"));
+ /* Set the number of active processors. */
+ _naca->processorCount++;
+ } else {
+ prom_print(RELOC("failed: "));
+ prom_print_hex(*acknowledge);
+ prom_print_nl();
+ }
+ }
+#ifdef CONFIG_HMT
+ /* Only enable HMT on processors that provide support. */
+ if (__is_processor(PV_PULSAR) ||
+ __is_processor(PV_ICESTAR) ||
+ __is_processor(PV_SSTAR)) {
+ prom_print(RELOC(" starting secondary threads\n"));
+
+ for (i=0; i < _naca->processorCount ;i++) {
+ unsigned long threadid = _naca->processorCount*2-1-i;
+
+ if (i == 0) {
+ unsigned long pir = _get_PIR();
+ if (__is_processor(PV_PULSAR)) {
+ RELOC(hmt_thread_data)[i].pir =
+ pir & 0x1f;
+ } else {
+ RELOC(hmt_thread_data)[i].pir =
+ pir & 0x3ff;
+ }
+ }
+
+ RELOC(hmt_thread_data)[i].threadid = threadid;
+#ifdef DEBUG_PROM
+ prom_print(RELOC(" cpuid 0x"));
+ prom_print_hex(i);
+ prom_print(RELOC(" maps to threadid 0x"));
+ prom_print_hex(threadid);
+ prom_print_nl();
+ prom_print(RELOC(" pir 0x"));
+ prom_print_hex(RELOC(hmt_thread_data)[i].pir);
+ prom_print_nl();
+#endif
+ _xPaca[threadid].xHwProcNum = _xPaca[i].xHwProcNum+1;
+ }
+ _naca->processorCount *= 2;
+ } else {
+ prom_print(RELOC("Processor is not HMT capable\n"));
+ }
+#endif
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("prom_hold_cpus: end...\n"));
+#endif
+}
+
+
+/*
+ * We enter here early on, when the Open Firmware prom is still
+ * handling exceptions and the MMU hash table for us.
+ */
+
+unsigned long __init
+prom_init(unsigned long r3, unsigned long r4, unsigned long pp,
+ unsigned long r6, unsigned long r7, yaboot_debug_t *yaboot)
+{
+ int chrp = 0;
+ unsigned long mem;
+ ihandle prom_mmu, prom_op, prom_root, prom_cpu;
+ phandle cpu_pkg;
+ unsigned long offset = reloc_offset();
+ long l;
+ char *p, *d;
+ unsigned long phys;
+ u32 getprop_rval;
+ struct Naca *_naca = RELOC(naca);
+ struct Paca *_xPaca = PTRRELOC(&xPaca[0]);
+ struct prom_t *_prom = PTRRELOC(&prom);
+
+ /* Default machine type. */
+ RELOC(_machine) = _MACH_pSeries;
+ /* Reset klimit to take into account the embedded system map */
+ if (RELOC(embedded_sysmap_end))
+ RELOC(klimit) = __va(PAGE_ALIGN(RELOC(embedded_sysmap_end)));
+
+ /* Get a handle to the prom entry point before anything else */
+ _prom->entry = pp;
+ _prom->bi_recs = prom_bi_rec_verify((struct bi_record *)r6);
+ if ( _prom->bi_recs != NULL ) {
+ RELOC(klimit) = PTRUNRELOC((unsigned long)_prom->bi_recs + _prom->bi_recs->data[1]);
+ }
+
+#ifdef DEBUG_YABOOT
+ call_yaboot(yaboot->dummy,offset>>32,offset&0xffffffff);
+ call_yaboot(yaboot->printf, RELOC("offset = 0x%08x%08x\n"), LONG_MSW(offset), LONG_LSW(offset));
+#endif
+
+ /* Default */
+ phys = KERNELBASE - offset;
+
+#ifdef DEBUG_YABOOT
+ call_yaboot(yaboot->printf, RELOC("phys = 0x%08x%08x\n"), LONG_MSW(phys), LONG_LSW(phys));
+#endif
+
+
+#ifdef DEBUG_YABOOT
+ _prom->yaboot = yaboot;
+ call_yaboot(yaboot->printf, RELOC("pp = 0x%08x%08x\n"), LONG_MSW(pp), LONG_LSW(pp));
+ call_yaboot(yaboot->printf, RELOC("prom = 0x%08x%08x\n"), LONG_MSW(_prom->entry), LONG_LSW(_prom->entry));
+#endif
+
+ /* First get a handle for the stdout device */
+ _prom->chosen = (ihandle)call_prom(RELOC("finddevice"), 1, 1,
+ RELOC("/chosen"));
+
+#ifdef DEBUG_YABOOT
+ call_yaboot(yaboot->printf, RELOC("prom->chosen = 0x%08x%08x\n"), LONG_MSW(_prom->chosen), LONG_LSW(_prom->chosen));
+#endif
+
+ if ((long)_prom->chosen <= 0)
+ prom_exit();
+
+ if ((long)call_prom(RELOC("getprop"), 4, 1, _prom->chosen,
+ RELOC("stdout"), &getprop_rval,
+ sizeof(getprop_rval)) <= 0)
+ prom_exit();
+
+ _prom->stdout = (ihandle)(unsigned long)getprop_rval;
+
+#ifdef DEBUG_YABOOT
+ if (_prom->stdout == 0) {
+ call_yaboot(yaboot->printf, RELOC("prom->stdout = 0x%08x%08x\n"), LONG_MSW(_prom->stdout), LONG_LSW(_prom->stdout));
+ }
+
+ call_yaboot(yaboot->printf, RELOC("prom->stdout = 0x%08x%08x\n"), LONG_MSW(_prom->stdout), LONG_LSW(_prom->stdout));
+#endif
+
+#ifdef DEBUG_YABOOT
+ call_yaboot(yaboot->printf, RELOC("Location: 0x11\n"));
+#endif
+
+ mem = RELOC(klimit) - offset;
+#ifdef DEBUG_YABOOT
+ call_yaboot(yaboot->printf, RELOC("Location: 0x11b\n"));
+#endif
+
+ /* Get the full OF pathname of the stdout device */
+ p = (char *) mem;
+ memset(p, 0, 256);
+ call_prom(RELOC("instance-to-path"), 3, 1, _prom->stdout, p, 255);
+ RELOC(of_stdout_device) = PTRUNRELOC(p);
+ mem += strlen(p) + 1;
+
+ getprop_rval = 1;
+ prom_root = (ihandle)call_prom(RELOC("finddevice"), 1, 1, RELOC("/"));
+ if (prom_root != (ihandle)-1) {
+ call_prom(RELOC("getprop"), 4, 1,
+ prom_root, RELOC("#size-cells"),
+ &getprop_rval, sizeof(getprop_rval));
+ }
+ _prom->encode_phys_size = (getprop_rval==1) ? 32 : 64;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("DRENG: Detect OF version...\n"));
+#endif
+ /* Find the OF version */
+ prom_op = (ihandle)call_prom(RELOC("finddevice"), 1, 1, RELOC("/openprom"));
+ if (prom_op != (ihandle)-1) {
+ char model[64];
+ long sz;
+ sz = (long)call_prom(RELOC("getprop"), 4, 1, prom_op,
+ RELOC("model"), model, 64);
+ if (sz > 0) {
+ char *c;
+ /* hack to skip the ibm chrp firmware # */
+ if ( strncmp(model,RELOC("IBM"),3) ) {
+ for (c = model; *c; c++)
+ if (*c >= '0' && *c <= '9') {
+ _prom->version = *c - '0';
+ break;
+ }
+ }
+ else
+ chrp = 1;
+ }
+ }
+ if (_prom->version >= 3)
+ prom_print(RELOC("OF Version 3 detected.\n"));
+
+
+ /* Determine which cpu is actually running right _now_ */
+ if ((long)call_prom(RELOC("getprop"), 4, 1, _prom->chosen,
+ RELOC("cpu"), &getprop_rval,
+ sizeof(getprop_rval)) <= 0)
+ prom_exit();
+
+ prom_cpu = (ihandle)(unsigned long)getprop_rval;
+ cpu_pkg = call_prom(RELOC("instance-to-package"), 1, 1, prom_cpu);
+ call_prom(RELOC("getprop"), 4, 1,
+ cpu_pkg, RELOC("reg"),
+ &getprop_rval, sizeof(getprop_rval));
+ _prom->cpu = (int)(unsigned long)getprop_rval;
+ _xPaca[0].xHwProcNum = _prom->cpu;
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("Booting CPU hw index = 0x"));
+ prom_print_hex(_prom->cpu);
+ prom_print_nl();
+#endif
+
+ /* Get the boot device and translate it to a full OF pathname. */
+ p = (char *) mem;
+ l = (long) call_prom(RELOC("getprop"), 4, 1, _prom->chosen,
+ RELOC("bootpath"), p, 1<<20);
+ if (l > 0) {
+ p[l] = 0; /* should already be null-terminated */
+ RELOC(bootpath) = PTRUNRELOC(p);
+ mem += l + 1;
+ d = (char *) mem;
+ *d = 0;
+ call_prom(RELOC("canon"), 3, 1, p, d, 1<<20);
+ RELOC(bootdevice) = PTRUNRELOC(d);
+ mem = DOUBLEWORD_ALIGN(mem + strlen(d) + 1);
+ }
+
+ mem = prom_initialize_lmb(mem);
+
+ mem = prom_bi_rec_reserve(mem);
+
+ mem = prom_instantiate_rtas(mem);
+
+ /* Initialize some system info into the Naca early... */
+ mem = prom_initialize_naca(mem);
+
+ /* If we are on an SMP machine, then we *MUST* do the
+ * following, regardless of whether we have an SMP
+ * kernel or not.
+ */
+ if ( _naca->processorCount > 1 )
+ prom_hold_cpus(mem);
+
+ mem = check_display(mem);
+
+#ifdef DEBUG_PROM
+ prom_print(RELOC("copying OF device tree...\n"));
+#endif
+ mem = copy_device_tree(mem);
+
+ RELOC(klimit) = mem + offset;
+
+ lmb_reserve(0, __pa(RELOC(klimit)));
+
+ if (RELOC(_machine) == _MACH_pSeries)
+ prom_initialize_tce_table();
+
+ if ((long) call_prom(RELOC("getprop"), 4, 1,
+ _prom->chosen,
+ RELOC("mmu"),
+ &getprop_rval,
+ sizeof(getprop_rval)) <= 0) {
+ prom_print(RELOC(" no MMU found\n"));
+ prom_exit();
+ }
+
+ /* We assume the phys. address size is 3 cells */
+ RELOC(prom_mmu) = (ihandle)(unsigned long)getprop_rval;
+
+ if ((long)call_prom(RELOC("call-method"), 4, 4,
+ RELOC("translate"),
+ prom_mmu,
+ (void *)(KERNELBASE - offset),
+ (void *)1) != 0) {
+ prom_print(RELOC(" (translate failed) "));
+ } else {
+ prom_print(RELOC(" (translate ok) "));
+ phys = (unsigned long)_prom->args.rets[3];
+ }
+
+ /* If OpenFirmware version >= 3, then use quiesce call */
+ if (_prom->version >= 3) {
+ prom_print(RELOC("Calling quiesce ...\n"));
+ call_prom(RELOC("quiesce"), 0, 0);
+ phys = KERNELBASE - offset;
+ }
+
+ prom_print(RELOC("returning from prom_init\n"));
+ return phys;
+}
+
+
+static int
+prom_set_color(ihandle ih, int i, int r, int g, int b)
+{
+ unsigned long offset = reloc_offset();
+
+ return (int)(long)call_prom(RELOC("call-method"), 6, 1,
+ RELOC("color!"),
+ ih,
+ (void *)(long) i,
+ (void *)(long) b,
+ (void *)(long) g,
+ (void *)(long) r );
+}
+
+/*
+ * If we have a display that we don't know how to drive,
+ * we will want to try to execute OF's open method for it
+ * later. However, OF will probably fall over if we do that
+ * we've taken over the MMU.
+ * So we check whether we will need to open the display,
+ * and if so, open it now.
+ */
+static unsigned long __init
+check_display(unsigned long mem)
+{
+ phandle node;
+ ihandle ih;
+ int i;
+ unsigned long offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+ char type[64], *path;
+ static unsigned char default_colors[] = {
+ 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xaa,
+ 0x00, 0xaa, 0x00,
+ 0x00, 0xaa, 0xaa,
+ 0xaa, 0x00, 0x00,
+ 0xaa, 0x00, 0xaa,
+ 0xaa, 0xaa, 0x00,
+ 0xaa, 0xaa, 0xaa,
+ 0x55, 0x55, 0x55,
+ 0x55, 0x55, 0xff,
+ 0x55, 0xff, 0x55,
+ 0x55, 0xff, 0xff,
+ 0xff, 0x55, 0x55,
+ 0xff, 0x55, 0xff,
+ 0xff, 0xff, 0x55,
+ 0xff, 0xff, 0xff
+ };
+
+ _prom->disp_node = 0;
+
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
+ type, sizeof(type));
+ if (strcmp(type, RELOC("display")) != 0)
+ continue;
+ /* It seems OF doesn't null-terminate the path :-( */
+ path = (char *) mem;
+ memset(path, 0, 256);
+ if ((long) call_prom(RELOC("package-to-path"), 3, 1,
+ node, path, 255) < 0)
+ continue;
+ prom_print(RELOC("opening display "));
+ prom_print(path);
+ ih = (ihandle)call_prom(RELOC("open"), 1, 1, path);
+ if (ih == (ihandle)0 || ih == (ihandle)-1) {
+ prom_print(RELOC("... failed\n"));
+ continue;
+ }
+ prom_print(RELOC("... ok\n"));
+
+ if (_prom->disp_node == 0)
+ _prom->disp_node = (ihandle)(unsigned long)node;
+
+ /* Setup a useable color table when the appropriate
+ * method is available. Should update this to set-colors */
+ for (i = 0; i < 32; i++)
+ if (prom_set_color(ih, i, RELOC(default_colors)[i*3],
+ RELOC(default_colors)[i*3+1],
+ RELOC(default_colors)[i*3+2]) != 0)
+ break;
+
+#ifdef CONFIG_FB
+ for (i = 0; i < LINUX_LOGO_COLORS; i++)
+ if (prom_set_color(ih, i + 32,
+ RELOC(linux_logo_red)[i],
+ RELOC(linux_logo_green)[i],
+ RELOC(linux_logo_blue)[i]) != 0)
+ break;
+#endif /* CONFIG_FB */
+
+ /*
+ * If this display is the device that OF is using for stdout,
+ * move it to the front of the list.
+ */
+ mem += strlen(path) + 1;
+ i = RELOC(prom_num_displays)++;
+ if (RELOC(of_stdout_device) != 0 && i > 0
+ && strcmp(PTRRELOC(RELOC(of_stdout_device)), path) == 0) {
+ for (; i > 0; --i)
+ RELOC(prom_display_paths[i]) = RELOC(prom_display_paths[i-1]);
+ }
+ RELOC(prom_display_paths[i]) = PTRUNRELOC(path);
+ if (RELOC(prom_num_displays) >= FB_MAX)
+ break;
+ }
+ return DOUBLEWORD_ALIGN(mem);
+}
+
+void
+virt_irq_init(void)
+{
+ int i;
+ for (i = 0; i < NR_IRQS; i++)
+ virt_irq_to_real_map[i] = UNDEFINED_IRQ;
+ for (i = 0; i < NR_HW_IRQS; i++)
+ real_irq_to_virt_map[i] = UNDEFINED_IRQ;
+}
+
+/* Create a mapping for a real_irq if it doesn't already exist.
+ * Return the virtual irq as a convenience.
+ */
+unsigned long
+virt_irq_create_mapping(unsigned long real_irq)
+{
+ unsigned long virq;
+ if (naca->interrupt_controller == IC_OPEN_PIC)
+ return real_irq; /* no mapping for openpic (for now) */
+ virq = real_irq_to_virt(real_irq);
+ if (virq == UNDEFINED_IRQ) {
+ /* Assign a virtual IRQ number */
+ if (real_irq < NR_IRQS && virt_irq_to_real(real_irq) == UNDEFINED_IRQ) {
+ /* A 1-1 mapping will work. */
+ virq = real_irq;
+ } else {
+ while (last_virt_irq < NR_IRQS &&
+ virt_irq_to_real(++last_virt_irq) != UNDEFINED_IRQ)
+ /* skip irq's in use */;
+ if (last_virt_irq >= NR_IRQS)
+ panic("Too many IRQs are required on this system. NR_IRQS=%d\n", NR_IRQS);
+ virq = last_virt_irq;
+ }
+ virt_irq_to_real_map[virq] = real_irq;
+ real_irq_to_virt_map[real_irq] = virq;
+ }
+ return virq;
+}
+
+
+static int __init
+prom_next_node(phandle *nodep)
+{
+ phandle node;
+ unsigned long offset = reloc_offset();
+
+ if ((node = *nodep) != 0
+ && (*nodep = call_prom(RELOC("child"), 1, 1, node)) != 0)
+ return 1;
+ if ((*nodep = call_prom(RELOC("peer"), 1, 1, node)) != 0)
+ return 1;
+ for (;;) {
+ if ((node = call_prom(RELOC("parent"), 1, 1, node)) == 0)
+ return 0;
+ if ((*nodep = call_prom(RELOC("peer"), 1, 1, node)) != 0)
+ return 1;
+ }
+}
+
+/*
+ * Make a copy of the device tree from the PROM.
+ */
+static unsigned long __init
+copy_device_tree(unsigned long mem_start)
+{
+ phandle root;
+ unsigned long new_start;
+ struct device_node **allnextp;
+ unsigned long offset = reloc_offset();
+ unsigned long mem_end = mem_start + (8<<20);
+
+ root = call_prom(RELOC("peer"), 1, 1, (phandle)0);
+ if (root == (phandle)0) {
+ prom_print(RELOC("couldn't get device tree root\n"));
+ prom_exit();
+ }
+ allnextp = &RELOC(allnodes);
+ mem_start = DOUBLEWORD_ALIGN(mem_start);
+ new_start = inspect_node(root, 0, mem_start, mem_end, &allnextp);
+ *allnextp = 0;
+ return new_start;
+}
+
+__init
+static unsigned long
+inspect_node(phandle node, struct device_node *dad,
+ unsigned long mem_start, unsigned long mem_end,
+ struct device_node ***allnextpp)
+{
+ int l;
+ phandle child;
+ struct device_node *np;
+ struct property *pp, **prev_propp;
+ char *prev_name, *namep;
+ unsigned char *valp;
+ unsigned long offset = reloc_offset();
+
+ np = (struct device_node *) mem_start;
+ mem_start += sizeof(struct device_node);
+ memset(np, 0, sizeof(*np));
+ np->node = node;
+ **allnextpp = PTRUNRELOC(np);
+ *allnextpp = &np->allnext;
+ if (dad != 0) {
+ np->parent = PTRUNRELOC(dad);
+ /* we temporarily use the `next' field as `last_child'. */
+ if (dad->next == 0)
+ dad->child = PTRUNRELOC(np);
+ else
+ dad->next->sibling = PTRUNRELOC(np);
+ dad->next = np;
+ }
+
+ /* get and store all properties */
+ prev_propp = &np->properties;
+ prev_name = RELOC("");
+ for (;;) {
+ pp = (struct property *) mem_start;
+ namep = (char *) (pp + 1);
+ pp->name = PTRUNRELOC(namep);
+ if ((long) call_prom(RELOC("nextprop"), 3, 1, node, prev_name,
+ namep) <= 0)
+ break;
+ mem_start = DOUBLEWORD_ALIGN((unsigned long)namep + strlen(namep) + 1);
+ prev_name = namep;
+ valp = (unsigned char *) mem_start;
+ pp->value = PTRUNRELOC(valp);
+ pp->length = (int)(long)
+ call_prom(RELOC("getprop"), 4, 1, node, namep,
+ valp, mem_end - mem_start);
+ if (pp->length < 0)
+ continue;
+ mem_start = DOUBLEWORD_ALIGN(mem_start + pp->length);
+ *prev_propp = PTRUNRELOC(pp);
+ prev_propp = &pp->next;
+ }
+ *prev_propp = 0;
+
+ /* get the node's full name */
+ l = (long) call_prom(RELOC("package-to-path"), 3, 1, node,
+ (char *) mem_start, mem_end - mem_start);
+ if (l >= 0) {
+ np->full_name = PTRUNRELOC((char *) mem_start);
+ *(char *)(mem_start + l) = 0;
+ mem_start = DOUBLEWORD_ALIGN(mem_start + l + 1);
+ }
+
+ /* do all our children */
+ child = call_prom(RELOC("child"), 1, 1, node);
+ while (child != (phandle)0) {
+ mem_start = inspect_node(child, np, mem_start, mem_end,
+ allnextpp);
+ child = call_prom(RELOC("peer"), 1, 1, child);
+ }
+
+ return mem_start;
+}
+
+/*
+ * finish_device_tree is called once things are running normally
+ * (i.e. with text and data mapped to the address they were linked at).
+ * It traverses the device tree and fills in the name, type,
+ * {n_}addrs and {n_}intrs fields of each node.
+ */
+void __init
+finish_device_tree(void)
+{
+ unsigned long mem = klimit;
+
+ virt_irq_init();
+
+ mem = finish_node(allnodes, mem, NULL, 0, 0);
+ dev_tree_size = mem - (unsigned long) allnodes;
+
+ mem = _ALIGN(mem, PAGE_SIZE);
+ lmb_reserve(__pa(klimit), mem-klimit);
+
+ klimit = mem;
+
+ rtas.dev = find_devices("rtas");
+}
+
+static unsigned long __init
+finish_node(struct device_node *np, unsigned long mem_start,
+ interpret_func *ifunc, int naddrc, int nsizec)
+{
+ struct device_node *child;
+ int *ip;
+
+ np->name = get_property(np, "name", 0);
+ np->type = get_property(np, "device_type", 0);
+
+ /* get the device addresses and interrupts */
+ if (ifunc != NULL) {
+ mem_start = ifunc(np, mem_start, naddrc, nsizec);
+ }
+ mem_start = finish_node_interrupts(np, mem_start);
+
+ /* Look for #address-cells and #size-cells properties. */
+ ip = (int *) get_property(np, "#address-cells", 0);
+ if (ip != NULL)
+ naddrc = *ip;
+ ip = (int *) get_property(np, "#size-cells", 0);
+ if (ip != NULL)
+ nsizec = *ip;
+
+ /* the f50 sets the name to 'display' and 'compatible' to what we
+ * expect for the name -- Cort
+ */
+ ifunc = NULL;
+ if (!strcmp(np->name, "display"))
+ np->name = get_property(np, "compatible", 0);
+
+ if (!strcmp(np->name, "device-tree") || np->parent == NULL)
+ ifunc = interpret_root_props;
+ else if (np->type == 0)
+ ifunc = NULL;
+ else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
+ ifunc = interpret_pci_props;
+ else if (!strcmp(np->type, "isa"))
+ ifunc = interpret_isa_props;
+
+ for (child = np->child; child != NULL; child = child->sibling)
+ mem_start = finish_node(child, mem_start, ifunc,
+ naddrc, nsizec);
+
+ return mem_start;
+}
+
+/* This routine walks the interrupt tree for a given device node and gather
+ * all necessary informations according to the draft interrupt mapping
+ * for CHRP. The current version was only tested on Apple "Core99" machines
+ * and may not handle cascaded controllers correctly.
+ */
+__init
+static unsigned long
+finish_node_interrupts(struct device_node *np, unsigned long mem_start)
+{
+ /* Finish this node */
+ unsigned int *isizep, *asizep, *interrupts, *map, *map_mask, *reg;
+ phandle *parent, map_parent;
+ struct device_node *node, *parent_node;
+ int l, isize, ipsize, asize, map_size, regpsize;
+
+ /* Currently, we don't look at all nodes with no "interrupts" property */
+
+ interrupts = (unsigned int *)get_property(np, "interrupts", &l);
+ if (interrupts == NULL)
+ return mem_start;
+ ipsize = l>>2;
+
+ reg = (unsigned int *)get_property(np, "reg", &l);
+ regpsize = l>>2;
+
+ /* We assume default interrupt cell size is 1 (bugus ?) */
+ isize = 1;
+ node = np;
+
+ do {
+ /* We adjust the cell size if the current parent contains an #interrupt-cells
+ * property */
+ isizep = (unsigned int *)get_property(node, "#interrupt-cells", &l);
+ if (isizep)
+ isize = *isizep;
+
+ /* We don't do interrupt cascade (ISA) for now, we stop on the first
+ * controller found
+ */
+ if (get_property(node, "interrupt-controller", &l)) {
+ int i,j;
+
+ np->intrs = (struct interrupt_info *) mem_start;
+ np->n_intrs = ipsize / isize;
+ mem_start += np->n_intrs * sizeof(struct interrupt_info);
+ for (i = 0; i < np->n_intrs; ++i) {
+ np->intrs[i].line = openpic_to_irq(virt_irq_create_mapping(*interrupts++));
+ np->intrs[i].sense = 1;
+ if (isize > 1)
+ np->intrs[i].sense = *interrupts++;
+ for (j=2; j<isize; j++)
+ interrupts++;
+ }
+ return mem_start;
+ }
+ /* We lookup for an interrupt-map. This code can only handle one interrupt
+ * per device in the map. We also don't handle #address-cells in the parent
+ * I skip the pci node itself here, may not be necessary but I don't like it's
+ * reg property.
+ */
+ if (np != node)
+ map = (unsigned int *)get_property(node, "interrupt-map", &l);
+ else
+ map = NULL;
+ if (map && l) {
+ int i, found, temp_isize, temp_asize;
+ map_size = l>>2;
+ map_mask = (unsigned int *)get_property(node, "interrupt-map-mask", &l);
+ asizep = (unsigned int *)get_property(node, "#address-cells", &l);
+ if (asizep && l == sizeof(unsigned int))
+ asize = *asizep;
+ else
+ asize = 0;
+ found = 0;
+ while (map_size>0 && !found) {
+ found = 1;
+ for (i=0; i<asize; i++) {
+ unsigned int mask = map_mask ? map_mask[i] : 0xffffffff;
+ if (!reg || (i>=regpsize) || ((mask & *map) != (mask & reg[i])))
+ found = 0;
+ map++;
+ map_size--;
+ }
+ for (i=0; i<isize; i++) {
+ unsigned int mask = map_mask ? map_mask[i+asize] : 0xffffffff;
+ if ((mask & *map) != (mask & interrupts[i]))
+ found = 0;
+ map++;
+ map_size--;
+ }
+ map_parent = *((phandle *)map);
+ map+=1; map_size-=1;
+ parent_node = find_phandle(map_parent);
+ temp_isize = isize;
+ temp_asize = 0;
+ if (parent_node) {
+ isizep = (unsigned int *)get_property(parent_node, "#interrupt-cells", &l);
+ if (isizep)
+ temp_isize = *isizep;
+ asizep = (unsigned int *)get_property(parent_node, "#address-cells", &l);
+ if (asizep && l == sizeof(unsigned int))
+ temp_asize = *asizep;
+ }
+ if (!found) {
+ map += temp_isize + temp_asize;
+ map_size -= temp_isize + temp_asize;
+ }
+ }
+ if (found) {
+ /* Mapped to a new parent. Use the reg and interrupts specified in
+ * the map as the new search parameters. Then search from the parent.
+ */
+ node = parent_node;
+ reg = map;
+ regpsize = temp_asize;
+ interrupts = map + temp_asize;
+ ipsize = temp_isize;
+ continue;
+ }
+ }
+ /* We look for an explicit interrupt-parent.
+ */
+ parent = (phandle *)get_property(node, "interrupt-parent", &l);
+ if (parent && (l == sizeof(phandle)) &&
+ (parent_node = find_phandle(*parent))) {
+ node = parent_node;
+ continue;
+ }
+ /* Default, get real parent */
+ node = node->parent;
+ } while (node);
+
+ return mem_start;
+}
+
+int
+prom_n_addr_cells(struct device_node* np)
+{
+ int* ip;
+ do {
+ if (np->parent)
+ np = np->parent;
+ ip = (int *) get_property(np, "#address-cells", 0);
+ if (ip != NULL)
+ return *ip;
+ } while (np->parent);
+ /* No #address-cells property for the root node, default to 1 */
+ return 1;
+}
+
+int
+prom_n_size_cells(struct device_node* np)
+{
+ int* ip;
+ do {
+ if (np->parent)
+ np = np->parent;
+ ip = (int *) get_property(np, "#size-cells", 0);
+ if (ip != NULL)
+ return *ip;
+ } while (np->parent);
+ /* No #size-cells property for the root node, default to 1 */
+ return 1;
+}
+
+static unsigned long __init
+interpret_pci_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
+{
+ struct address_range *adr;
+ struct pci_reg_property *pci_addrs;
+ int i, l;
+
+ pci_addrs = (struct pci_reg_property *)
+ get_property(np, "assigned-addresses", &l);
+ if (pci_addrs != 0 && l >= sizeof(struct pci_reg_property)) {
+ i = 0;
+ adr = (struct address_range *) mem_start;
+ while ((l -= sizeof(struct pci_reg_property)) >= 0) {
+ adr[i].space = pci_addrs[i].addr.a_hi;
+ adr[i].address = pci_addrs[i].addr.a_lo;
+ adr[i].size = pci_addrs[i].size_lo;
+ ++i;
+ }
+ np->addrs = adr;
+ np->n_addrs = i;
+ mem_start += i * sizeof(struct address_range);
+ }
+ return mem_start;
+}
+
+static unsigned long __init
+interpret_isa_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
+{
+ struct isa_reg_property *rp;
+ struct address_range *adr;
+ int i, l;
+
+ rp = (struct isa_reg_property *) get_property(np, "reg", &l);
+ if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
+ i = 0;
+ adr = (struct address_range *) mem_start;
+ while ((l -= sizeof(struct reg_property)) >= 0) {
+ adr[i].space = rp[i].space;
+ adr[i].address = rp[i].address
+ + (adr[i].space? 0: _ISA_MEM_BASE);
+ adr[i].size = rp[i].size;
+ ++i;
+ }
+ np->addrs = adr;
+ np->n_addrs = i;
+ mem_start += i * sizeof(struct address_range);
+ }
+
+ return mem_start;
+}
+
+static unsigned long __init
+interpret_root_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
+{
+ struct address_range *adr;
+ int i, l;
+ unsigned int *rp;
+ int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
+
+ rp = (unsigned int *) get_property(np, "reg", &l);
+ if (rp != 0 && l >= rpsize) {
+ i = 0;
+ adr = (struct address_range *) mem_start;
+ while ((l -= rpsize) >= 0) {
+ adr[i].space = 0;
+ adr[i].address = rp[naddrc - 1];
+ adr[i].size = rp[naddrc + nsizec - 1];
+ ++i;
+ rp += naddrc + nsizec;
+ }
+ np->addrs = adr;
+ np->n_addrs = i;
+ mem_start += i * sizeof(struct address_range);
+ }
+
+ return mem_start;
+}
+
+/*
+ * Work out the sense (active-low level / active-high edge)
+ * of each interrupt from the device tree.
+ */
+void __init
+prom_get_irq_senses(unsigned char *senses, int off, int max)
+{
+ struct device_node *np;
+ int i, j;
+
+ /* default to level-triggered */
+ memset(senses, 1, max - off);
+
+ for (np = allnodes; np != 0; np = np->allnext) {
+ for (j = 0; j < np->n_intrs; j++) {
+ i = np->intrs[j].line;
+ if (i >= off && i < max)
+ senses[i-off] = np->intrs[j].sense;
+ }
+ }
+}
+
+/*
+ * Construct and return a list of the device_nodes with a given name.
+ */
+struct device_node *
+find_devices(const char *name)
+{
+ struct device_node *head, **prevp, *np;
+
+ prevp = &head;
+ for (np = allnodes; np != 0; np = np->allnext) {
+ if (np->name != 0 && strcasecmp(np->name, name) == 0) {
+ *prevp = np;
+ prevp = &np->next;
+ }
+ }
+ *prevp = 0;
+ return head;
+}
+
+/*
+ * Construct and return a list of the device_nodes with a given type.
+ */
+struct device_node *
+find_type_devices(const char *type)
+{
+ struct device_node *head, **prevp, *np;
+
+ prevp = &head;
+ for (np = allnodes; np != 0; np = np->allnext) {
+ if (np->type != 0 && strcasecmp(np->type, type) == 0) {
+ *prevp = np;
+ prevp = &np->next;
+ }
+ }
+ *prevp = 0;
+ return head;
+}
+
+/*
+ * Returns all nodes linked together
+ */
+struct device_node * __openfirmware
+find_all_nodes(void)
+{
+ struct device_node *head, **prevp, *np;
+
+ prevp = &head;
+ for (np = allnodes; np != 0; np = np->allnext) {
+ *prevp = np;
+ prevp = &np->next;
+ }
+ *prevp = 0;
+ return head;
+}
+
+/* Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
+ */
+int
+device_is_compatible(struct device_node *device, const char *compat)
+{
+ const char* cp;
+ int cplen, l;
+
+ cp = (char *) get_property(device, "compatible", &cplen);
+ if (cp == NULL)
+ return 0;
+ while (cplen > 0) {
+ if (strncasecmp(cp, compat, strlen(compat)) == 0)
+ return 1;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Indicates whether the root node has a given value in its
+ * compatible property.
+ */
+int
+machine_is_compatible(const char *compat)
+{
+ struct device_node *root;
+
+ root = find_path_device("/");
+ if (root == 0)
+ return 0;
+ return device_is_compatible(root, compat);
+}
+
+/*
+ * Construct and return a list of the device_nodes with a given type
+ * and compatible property.
+ */
+struct device_node *
+find_compatible_devices(const char *type, const char *compat)
+{
+ struct device_node *head, **prevp, *np;
+
+ prevp = &head;
+ for (np = allnodes; np != 0; np = np->allnext) {
+ if (type != NULL
+ && !(np->type != 0 && strcasecmp(np->type, type) == 0))
+ continue;
+ if (device_is_compatible(np, compat)) {
+ *prevp = np;
+ prevp = &np->next;
+ }
+ }
+ *prevp = 0;
+ return head;
+}
+
+/*
+ * Find the device_node with a given full_name.
+ */
+struct device_node *
+find_path_device(const char *path)
+{
+ struct device_node *np;
+
+ for (np = allnodes; np != 0; np = np->allnext)
+ if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
+ return np;
+ return NULL;
+}
+
+/*
+ * Find the device_node with a given phandle.
+ */
+static struct device_node * __init
+find_phandle(phandle ph)
+{
+ struct device_node *np;
+
+ for (np = allnodes; np != 0; np = np->allnext)
+ if (np->node == ph)
+ return np;
+ return NULL;
+}
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
+unsigned char *
+get_property(struct device_node *np, const char *name, int *lenp)
+{
+ struct property *pp;
+
+ for (pp = np->properties; pp != 0; pp = pp->next)
+ if (strcmp(pp->name, name) == 0) {
+ if (lenp != 0)
+ *lenp = pp->length;
+ return pp->value;
+ }
+ return 0;
+}
+
+/*
+ * Add a property to a node
+ */
+void __openfirmware
+prom_add_property(struct device_node* np, struct property* prop)
+{
+ struct property **next = &np->properties;
+
+ prop->next = NULL;
+ while (*next)
+ next = &(*next)->next;
+ *next = prop;
+}
+
+#if 0
+void __openfirmware
+print_properties(struct device_node *np)
+{
+ struct property *pp;
+ char *cp;
+ int i, n;
+
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+ printk(KERN_INFO "%s", pp->name);
+ for (i = strlen(pp->name); i < 16; ++i)
+ printk(" ");
+ cp = (char *) pp->value;
+ for (i = pp->length; i > 0; --i, ++cp)
+ if ((i > 1 && (*cp < 0x20 || *cp > 0x7e))
+ || (i == 1 && *cp != 0))
+ break;
+ if (i == 0 && pp->length > 1) {
+ /* looks like a string */
+ printk(" %s\n", (char *) pp->value);
+ } else {
+ /* dump it in hex */
+ n = pp->length;
+ if (n > 64)
+ n = 64;
+ if (pp->length % 4 == 0) {
+ unsigned int *p = (unsigned int *) pp->value;
+
+ n /= 4;
+ for (i = 0; i < n; ++i) {
+ if (i != 0 && (i % 4) == 0)
+ printk("\n ");
+ printk(" %08x", *p++);
+ }
+ } else {
+ unsigned char *bp = pp->value;
+
+ for (i = 0; i < n; ++i) {
+ if (i != 0 && (i % 16) == 0)
+ printk("\n ");
+ printk(" %02x", *bp++);
+ }
+ }
+ printk("\n");
+ if (pp->length > 64)
+ printk(" ... (length = %d)\n",
+ pp->length);
+ }
+ }
+}
+#endif
+
+
+void __init
+abort()
+{
+#ifdef CONFIG_XMON
+ xmon(NULL);
+#endif
+ for (;;)
+ prom_exit();
+}
+
+
+/* Verify bi_recs are good */
+static struct bi_record *
+prom_bi_rec_verify(struct bi_record *bi_recs)
+{
+ struct bi_record *first, *last;
+
+ if ( bi_recs == NULL || bi_recs->tag != BI_FIRST )
+ return NULL;
+
+ last = (struct bi_record *)bi_recs->data[0];
+ if ( last == NULL || last->tag != BI_LAST )
+ return NULL;
+
+ first = (struct bi_record *)last->data[0];
+ if ( first == NULL || first != bi_recs )
+ return NULL;
+
+ return bi_recs;
+}
+
+static unsigned long
+prom_bi_rec_reserve(unsigned long mem)
+{
+ unsigned long offset = reloc_offset();
+ struct prom_t *_prom = PTRRELOC(&prom);
+ struct bi_record *rec;
+
+ if ( _prom->bi_recs != NULL) {
+
+ for ( rec=_prom->bi_recs;
+ rec->tag != BI_LAST;
+ rec=bi_rec_next(rec) ) {
+ switch (rec->tag) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ case BI_INITRD:
+ lmb_reserve(rec->data[0], rec->data[1]);
+ break;
+#endif /* CONFIG_BLK_DEV_INITRD */
+ }
+ }
+ /* The next use of this field will be after relocation
+ * is enabled, so convert this physical address into a
+ * virtual address.
+ */
+ _prom->bi_recs = PTRUNRELOC(_prom->bi_recs);
+ }
+
+ return mem;
+}
+
--- /dev/null
+/*
+ * linux/arch/ppc/kernel/ptrace.c
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/m68k/kernel/ptrace.c"
+ * Copyright (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * Modified by Cort Dougan (cort@hq.fsmlabs.com)
+ * and Paul Mackerras (paulus@linuxcare.com.au).
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * Set of msr bits that gdb can change on behalf of a process.
+ */
+#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline unsigned long get_reg(struct task_struct *task, int regno)
+{
+ if (regno < sizeof(struct pt_regs) / sizeof(unsigned long))
+ return ((unsigned long *)task->thread.regs)[regno];
+ return (0);
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task, int regno,
+ unsigned long data)
+{
+ if (regno < PT_SOFTE) {
+ if (regno == PT_MSR)
+ data = (data & MSR_DEBUGCHANGE)
+ | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
+ ((unsigned long *)task->thread.regs)[regno] = data;
+ return 0;
+ }
+ return -EIO;
+}
+
+static inline void
+set_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+ if (regs != NULL)
+ regs->msr |= MSR_SE;
+}
+
+static inline void
+clear_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+ if (regs != NULL)
+ regs->msr &= ~MSR_SE;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* make sure the single step bit is not set. */
+ clear_single_step(child);
+}
+
+int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret = -EPERM;
+
+ lock_kernel();
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out_tsk;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ break;
+ ret = put_user(tmp,(unsigned long *) data);
+ break;
+ }
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long index, tmp;
+
+ ret = -EIO;
+ /* convert to index and check */
+ index = (unsigned long) addr >> 3;
+ if ((addr & 7) || index > PT_FPSCR)
+ break;
+
+ if (index < PT_FPR0) {
+ tmp = get_reg(child, (int) index);
+ } else {
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
+ }
+ ret = put_user(tmp,(unsigned long *) data);
+ break;
+ }
+
+ /* If I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ break;
+ ret = -EIO;
+ break;
+
+ /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR: {
+ unsigned long index;
+
+ ret = -EIO;
+ /* convert to index and check */
+ index = (unsigned long) addr >> 3;
+ if ((addr & 7) || index > PT_FPSCR)
+ break;
+
+ if (index == PT_ORIG_R3)
+ break;
+ if (index < PT_FPR0) {
+ ret = put_reg(child, index, data);
+ } else {
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
+ ret = 0;
+ }
+ break;
+ }
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+ clear_single_step(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ ret = 0;
+ if (child->state == TASK_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+ clear_single_step(child);
+ wake_up_process(child);
+ break;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ set_single_step(child);
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_DETACH:
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PPC_PTRACE_GETREGS:
+ { /* Get GPRs 0 - 31. */
+ u64 tmp;
+ u64 cntr;
+ ret = 0;
+ for (cntr=0; cntr<32 && ret==0; ++cntr)
+ {
+ tmp = ((u64*)child->thread.regs)[cntr];
+ ret = put_user(tmp, (u64*)(data+cntr));
+ }
+ break;
+ }
+
+ case PPC_PTRACE_SETREGS:
+ { /* Set GPRs 0 - 31. */
+ u64 cntr;
+ ret = 0;
+ for (cntr=0; cntr<32 && ret==0; ++cntr)
+ {
+ ret = put_reg(child, cntr, *(u64*)(data+cntr));
+ }
+ break;
+ }
+
+ case PPC_PTRACE_GETFPREGS:
+ { /* Get FPRs 0 - 31. */
+ u64 tmp;
+ u64 cntr;
+ ret = -EIO;
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ ret = 0;
+ for (cntr=0; cntr<32 && ret==0; ++cntr)
+ {
+ tmp = ((u64*)child->thread.fpr)[cntr];
+ ret = put_user(tmp, (u64*)(data+cntr));
+ }
+ break;
+ }
+
+ case PPC_PTRACE_SETFPREGS:
+ { /* Get FPRs 0 - 31. */
+ u64 cntr;
+ ret = -EIO;
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ for (cntr=0; cntr<32; ++cntr)
+ {
+ ((u64*)child->thread.fpr)[cntr] = *(u64*)(data+cntr);
+ }
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = -EIO;
+ break;
+ }
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+void do_syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ current->exit_code = SIGTRAP;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
+
--- /dev/null
+/*
+ * linux/arch/ppc/kernel/ptrace32.c
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/m68k/kernel/ptrace.c"
+ * Copyright (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * Modified by Cort Dougan (cort@hq.fsmlabs.com)
+ * and Paul Mackerras (paulus@linuxcare.com.au).
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * Set of msr bits that gdb can change on behalf of a process.
+ */
+#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline unsigned long get_reg(struct task_struct *task, int regno)
+{
+ if (regno < sizeof(struct pt_regs) / sizeof(unsigned long))
+ return ((unsigned long *)task->thread.regs)[regno];
+ return (0);
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ * (Put DATA into task TASK's register REGNO.)
+ */
+static inline int put_reg(struct task_struct *task, int regno, unsigned long data)
+{
+ if (regno < PT_SOFTE)
+ {
+ if (regno == PT_MSR)
+ data = (data & MSR_DEBUGCHANGE) | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
+ ((unsigned long *)task->thread.regs)[regno] = data;
+ return 0;
+ }
+ return -EIO;
+}
+
+static inline void
+set_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+ if (regs != NULL)
+ regs->msr |= MSR_SE;
+}
+
+static inline void
+clear_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+ if (regs != NULL)
+ regs->msr &= ~MSR_SE;
+}
+
+int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
+{
+ struct task_struct *child;
+ int ret = -EPERM;
+
+ lock_kernel();
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out_tsk;
+
+ switch (request)
+ {
+ /* Read word at location ADDR */
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ {
+ unsigned int tmp_mem_value;
+ int copied;
+
+ copied = access_process_vm(child, addr, &tmp_mem_value, sizeof(tmp_mem_value), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp_mem_value))
+ break;
+ ret = put_user(tmp_mem_value, (u32*)data); // copy 4 bytes of data into the user location specified by the 8 byte pointer in "data".
+ break;
+ }
+
+ /* Read 4 bytes of the other process' storage */
+ /* data is a pointer specifying where the user wants the 4 bytes copied into */
+ /* addr is a pointer in the user's storage that contains an 8 byte address in the other process of the 4 bytes that is to be read */
+ /* (this is run in a 32-bit process looking at a 64-bit process) */
+ /* when I and D space are separate, these will need to be fixed. */
+ case PPC_PTRACE_PEEKTEXT_3264:
+ case PPC_PTRACE_PEEKDATA_3264:
+ {
+ u32 tmp_mem_value;
+ int copied;
+ u32* addrOthers;
+
+ ret = -EIO;
+
+ /* Get the addr in the other process that we want to read */
+ if (get_user(addrOthers,(u32**)addr) != 0)
+ break;
+
+ copied = access_process_vm(child, (u64)addrOthers, &tmp_mem_value, sizeof(tmp_mem_value), 0);
+ if (copied != sizeof(tmp_mem_value))
+ break;
+ ret = put_user(tmp_mem_value, (u32*)data); // copy 4 bytes of data into the user location specified by the 8 byte pointer in "data".
+ break;
+ }
+
+ /* Read a register (specified by ADDR) out of the "user area" */
+ case PTRACE_PEEKUSR: {
+ int index;
+ unsigned int reg32bits;
+ unsigned long tmp_reg_value;
+
+ ret = -EIO;
+ /* convert to index and check */
+ index = (unsigned long) addr >> 2;
+ if ((addr & 3) || index > PT_FPSCR32)
+ break;
+
+ if (index < PT_FPR0) {
+ tmp_reg_value = get_reg(child, index);
+ } else {
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ /* the user space code considers the floating point to be
+ * an array of unsigned int (32 bits) - the index passed
+ * in is based on this assumption.
+ */
+ tmp_reg_value = ((unsigned int *)child->thread.fpr)[index - PT_FPR0];
+ }
+ reg32bits = tmp_reg_value;
+ ret = put_user(reg32bits, (u32*)data); // copy 4 bytes of data into the user location specified by the 8 byte pointer in "data".
+ break;
+ }
+
+ /* Read 4 bytes out of the other process' pt_regs area */
+ /* data is a pointer specifying where the user wants the 4 bytes copied into */
+ /* addr is the offset into the other process' pt_regs structure that is to be read */
+ /* (this is run in a 32-bit process looking at a 64-bit process) */
+ case PPC_PTRACE_PEEKUSR_3264:
+ {
+ u32 index;
+ u32 reg32bits;
+ u64 tmp_reg_value;
+ u32 numReg;
+ u32 part;
+
+ ret = -EIO;
+ /* Determine which register the user wants */
+ index = (u64)addr >> 2; /* Divide addr by 4 */
+ numReg = index / 2;
+ /* Determine which part of the register the user wants */
+ if (index % 2)
+ part = 1; /* want the 2nd half of the register (right-most). */
+ else
+ part = 0; /* want the 1st half of the register (left-most). */
+
+ /* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */
+ if ((addr & 3) || numReg > PT_FPSCR)
+ break;
+
+ if (numReg >= PT_FPR0)
+ {
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ }
+ tmp_reg_value = get_reg(child, numReg);
+ reg32bits = ((u32*)&tmp_reg_value)[part];
+ ret = put_user(reg32bits, (u32*)data); /* copy 4 bytes of data into the user location specified by the 8 byte pointer in "data". */
+ break;
+ }
+
+ /* Write the word at location ADDR */
+ /* If I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA: {
+ unsigned int tmp_value_to_write;
+ tmp_value_to_write = data;
+ ret = 0;
+ if (access_process_vm(child, addr, &tmp_value_to_write, sizeof(tmp_value_to_write), 1) == sizeof(tmp_value_to_write))
+ break;
+ ret = -EIO;
+ break;
+ }
+
+ /* Write 4 bytes into the other process' storage */
+ /* data is the 4 bytes that the user wants written */
+ /* addr is a pointer in the user's storage that contains an 8 byte address in the other process where the 4 bytes that is to be written */
+ /* (this is run in a 32-bit process looking at a 64-bit process) */
+ /* when I and D space are separate, these will need to be fixed. */
+ case PPC_PTRACE_POKETEXT_3264:
+ case PPC_PTRACE_POKEDATA_3264:
+ {
+ u32 tmp_value_to_write = data;
+ u32* addrOthers;
+ int bytesWritten;
+
+ /* Get the addr in the other process that we want to write into */
+ ret = -EIO;
+ if (get_user(addrOthers,(u32**)addr) != 0)
+ break;
+
+ ret = 0;
+ bytesWritten = access_process_vm(child, (u64)addrOthers, &tmp_value_to_write, sizeof(tmp_value_to_write), 1);
+ if (bytesWritten == sizeof(tmp_value_to_write))
+ break;
+ ret = -EIO;
+ break;
+ }
+
+ /* Write DATA into location ADDR within the USER area */
+ case PTRACE_POKEUSR: {
+ unsigned long index;
+
+ ret = -EIO;
+
+ /* convert to index and check */
+ index = (unsigned long) addr >> 2;
+ if ((addr & 3) || index > PT_FPSCR32)
+ break;
+
+ if (index == PT_ORIG_R3)
+ break;
+
+
+ if (index < PT_FPR0) {
+ ret = put_reg(child, index, data);
+ } else {
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ /* the user space code considers the floating point to be
+ * an array of unsigned int (32 bits) - the index passed
+ * in is based on this assumption.
+ */
+
+ ((unsigned int *)child->thread.fpr)[index - PT_FPR0] = data;
+ ret = 0;
+ }
+ break;
+ }
+
+ /* Write 4 bytes into the other process' pt_regs area */
+ /* data is the 4 bytes that the user wants written */
+ /* addr is the offset into the other process' pt_regs structure that is to be written into */
+ /* (this is run in a 32-bit process looking at a 64-bit process) */
+ case PPC_PTRACE_POKEUSR_3264:
+ {
+ u32 index;
+ u32 numReg;
+
+ ret = -EIO;
+
+ /* Determine which register the user wants */
+ index = (u64)addr >> 2; /* Divide addr by 4 */
+ numReg = index / 2;
+
+ /* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */
+ if ((addr & 3) || numReg > PT_FPSCR)
+ break;
+ /* Insure it is a register we let them change */
+ if ((numReg == PT_ORIG_R3) || ((numReg > PT_CCR) && (numReg < PT_FPR0)))
+ break;
+
+ if (numReg >= PT_FPR0)
+ {
+ if (child->thread.regs->msr & MSR_FP)
+ giveup_fpu(child);
+ }
+
+ if (numReg == PT_MSR)
+ data = (data & MSR_DEBUGCHANGE) | (child->thread.regs->msr & ~MSR_DEBUGCHANGE);
+
+ ((u32*)child->thread.regs)[index] = data;
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+ clear_single_step(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ /*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ ret = 0;
+ if (child->state == TASK_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+ clear_single_step(child);
+ wake_up_process(child);
+ break;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ set_single_step(child);
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_DETACH:
+ ret = ptrace_detach(child, data);
+ break;
+
+ default:
+ ret = -EIO;
+ break;
+ }
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ return ret;
+}
--- /dev/null
+
+/*
+ * ras.c
+ * Copyright (C) 2001 Dave Engebretsen IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Change Activity:
+ * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
+ * End Change Activity
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/threads.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/random.h>
+#include <linux/sysrq.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/cache.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/machdep.h>
+#include <asm/rtas.h>
+#include <asm/ppcdebug.h>
+
+static void ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static void ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+void init_ras_IRQ(void);
+
+/* #define DEBUG */
+
+/*
+ * Initialize handlers for the set of interrupts caused by hardware errors
+ * and power system events.
+ */
+void init_ras_IRQ(void) {
+ struct device_node *np;
+ unsigned int *ireg, len, i;
+
+ if((np = find_path_device("/event-sources/internal-errors")) &&
+ (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
+ &len))) {
+ for(i=0; i<(len / sizeof(*ireg)); i++) {
+ request_irq(virt_irq_create_mapping(*(ireg)) + NUM_8259_INTERRUPTS,
+ &ras_error_interrupt, 0,
+ "RAS_ERROR", NULL);
+ ireg++;
+ }
+ }
+
+ if((np = find_path_device("/event-sources/epow-events")) &&
+ (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
+ &len))) {
+ for(i=0; i<(len / sizeof(*ireg)); i++) {
+ request_irq(virt_irq_create_mapping(*(ireg)) + NUM_8259_INTERRUPTS,
+ &ras_epow_interrupt, 0,
+ "RAS_EPOW", NULL);
+ ireg++;
+ }
+ }
+}
+
+/*
+ * Handle power subsystem events (EPOW).
+ *
+ * Presently we just log the event has occured. This should be fixed
+ * to examine the type of power failure and take appropriate action where
+ * the time horizon permits something useful to be done.
+ */
+static void
+ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct rtas_error_log log_entry;
+ unsigned int size = sizeof(log_entry);
+ long status = 0xdeadbeef;
+
+ status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
+ 0x500, irq,
+ EPOW_WARNING | POWERMGM_EVENTS,
+ 1, /* Time Critical */
+ __pa(&log_entry), size);
+
+ udbg_printf("EPOW <0x%lx 0x%lx>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_WARNING
+ "EPOW <0x%lx 0x%lx>\n",*((unsigned long *)&log_entry), status);
+}
+
+/*
+ * Handle hardware error interrupts.
+ *
+ * RTAS check-exception is called to collect data on the exception. If
+ * the error is deemed recoverable, we log a warning and return.
+ * For nonrecoverable errors, an error is logged and we stop all processing
+ * as quickly as possible in order to prevent propagation of the failure.
+ */
+static void
+ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct rtas_error_log log_entry;
+ unsigned int size = sizeof(log_entry);
+ long status = 0xdeadbeef;
+
+ status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
+ 0x500, irq,
+ INTERNAL_ERROR,
+ 1, /* Time Critical */
+ __pa(&log_entry), size);
+
+ if((status != 1) &&
+ (log_entry.severity >= SEVERITY_ERROR_SYNC)) {
+ udbg_printf("HW Error <0x%lx 0x%lx>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_EMERG
+ "Error: Fatal hardware error <0x%lx 0x%lx>\n",
+ *((unsigned long *)&log_entry), status);
+
+#ifndef DEBUG
+ /* Don't actually power off when debugging so we can test
+ * without actually failing while injecting errors.
+ */
+ ppc_md.power_off();
+#endif
+ } else {
+ udbg_printf("Recoverable HW Error <0x%lx 0x%lx>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_WARNING
+ "Warning: Recoverable hardware error <0x%lx 0x%lx>\n",
+ *((unsigned long *)&log_entry), status);
+
+ return;
+ }
+}
--- /dev/null
+/*
+ * arch/ppc64/kernel/rtas-proc.c
+ * Copyright (C) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * RTAS (Runtime Abstraction Services) stuff
+ * Intention is to provide a clean user interface
+ * to use the RTAS.
+ *
+ * TODO:
+ * Split off a header file and maybe move it to a different
+ * location. Write Documentation on what the /proc/rtas/ entries
+ * actually do.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <linux/time.h>
+#include <linux/string.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/machdep.h> /* for ppc_md */
+#include <asm/time.h>
+
+/* Token for Sensors */
+#define KEY_SWITCH 0x0001
+#define ENCLOSURE_SWITCH 0x0002
+#define THERMAL_SENSOR 0x0003
+#define LID_STATUS 0x0004
+#define POWER_SOURCE 0x0005
+#define BATTERY_VOLTAGE 0x0006
+#define BATTERY_REMAINING 0x0007
+#define BATTERY_PERCENTAGE 0x0008
+#define EPOW_SENSOR 0x0009
+#define BATTERY_CYCLESTATE 0x000a
+#define BATTERY_CHARGING 0x000b
+
+/* IBM specific sensors */
+#define IBM_SURVEILLANCE 0x2328 /* 9000 */
+#define IBM_FANRPM 0x2329 /* 9001 */
+#define IBM_VOLTAGE 0x232a /* 9002 */
+#define IBM_DRCONNECTOR 0x232b /* 9003 */
+#define IBM_POWERSUPPLY 0x232c /* 9004 */
+#define IBM_INTQUEUE 0x232d /* 9005 */
+
+/* Status return values */
+#define SENSOR_CRITICAL_HIGH 13
+#define SENSOR_WARNING_HIGH 12
+#define SENSOR_NORMAL 11
+#define SENSOR_WARNING_LOW 10
+#define SENSOR_CRITICAL_LOW 9
+#define SENSOR_SUCCESS 0
+#define SENSOR_HW_ERROR -1
+#define SENSOR_BUSY -2
+#define SENSOR_NOT_EXIST -3
+#define SENSOR_DR_ENTITY -9000
+
+/* Location Codes */
+#define LOC_SCSI_DEV_ADDR 'A'
+#define LOC_SCSI_DEV_LOC 'B'
+#define LOC_CPU 'C'
+#define LOC_DISKETTE 'D'
+#define LOC_ETHERNET 'E'
+#define LOC_FAN 'F'
+#define LOC_GRAPHICS 'G'
+/* reserved / not used 'H' */
+#define LOC_IO_ADAPTER 'I'
+/* reserved / not used 'J' */
+#define LOC_KEYBOARD 'K'
+#define LOC_LCD 'L'
+#define LOC_MEMORY 'M'
+#define LOC_NV_MEMORY 'N'
+#define LOC_MOUSE 'O'
+#define LOC_PLANAR 'P'
+#define LOC_OTHER_IO 'Q'
+#define LOC_PARALLEL 'R'
+#define LOC_SERIAL 'S'
+#define LOC_DEAD_RING 'T'
+#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
+#define LOC_VOLTAGE 'V'
+#define LOC_SWITCH_ADAPTER 'W'
+#define LOC_OTHER 'X'
+#define LOC_FIRMWARE 'Y'
+#define LOC_SCSI 'Z'
+
+/* Tokens for indicators */
+#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
+#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
+#define SYSTEM_POWER_STATE 0x0003
+#define WARNING_LIGHT 0x0004
+#define DISK_ACTIVITY_LIGHT 0x0005
+#define HEX_DISPLAY_UNIT 0x0006
+#define BATTERY_WARNING_TIME 0x0007
+#define CONDITION_CYCLE_REQUEST 0x0008
+#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
+#define DR_ACTION 0x2329 /* 9001 */
+#define DR_INDICATOR 0x232a /* 9002 */
+/* 9003 - 9004: Vendor specific */
+#define GLOBAL_INTERRUPT_QUEUE 0x232d /* 9005 */
+/* 9006 - 9999: Vendor specific */
+
+/* other */
+#define MAX_SENSORS 17 /* I only know of 17 sensors */
+#define MAX_LINELENGTH 256
+#define SENSOR_PREFIX "ibm,sensor-"
+#define cel_to_fahr(x) ((x*9/5)+32)
+
+
+/* Globals */
+static struct proc_dir_entry *proc_rtas;
+static struct rtas_sensors sensors;
+static struct device_node *rtas_node;
+static unsigned long power_on_time = 0; /* Save the time the user set */
+static char progress_led[MAX_LINELENGTH];
+
+static unsigned long rtas_tone_frequency = 1000;
+static unsigned long rtas_tone_volume = 0;
+
+/* ****************STRUCTS******************************************* */
+struct individual_sensor {
+ unsigned int token;
+ unsigned int quant;
+};
+
+struct rtas_sensors {
+ struct individual_sensor sensor[MAX_SENSORS];
+ unsigned int quant;
+};
+
+/* ****************************************************************** */
+/* Declarations */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data);
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+
+struct file_operations ppc_rtas_poweron_operations = {
+ read: ppc_rtas_poweron_read,
+ write: ppc_rtas_poweron_write
+};
+struct file_operations ppc_rtas_progress_operations = {
+ read: ppc_rtas_progress_read,
+ write: ppc_rtas_progress_write
+};
+
+struct file_operations ppc_rtas_clock_operations = {
+ read: ppc_rtas_clock_read,
+ write: ppc_rtas_clock_write
+};
+
+struct file_operations ppc_rtas_tone_freq_operations = {
+ read: ppc_rtas_tone_freq_read,
+ write: ppc_rtas_tone_freq_write
+};
+struct file_operations ppc_rtas_tone_volume_operations = {
+ read: ppc_rtas_tone_volume_read,
+ write: ppc_rtas_tone_volume_write
+};
+
+int ppc_rtas_find_all_sensors (void);
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf);
+char * ppc_rtas_process_error(int error);
+int get_location_code(struct individual_sensor s, char * buf);
+int check_location_string (char *c, char * buf);
+int check_location (char *c, int idx, char * buf);
+
+/* ****************************************************************** */
+/* MAIN */
+/* ****************************************************************** */
+void proc_rtas_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ rtas_node = find_devices("rtas");
+ if ((rtas_node == 0) || (_machine == _MACH_iSeries)) {
+ return;
+ }
+
+ proc_rtas = proc_mkdir("rtas", 0);
+ if (proc_rtas == 0)
+ return;
+
+ /* /proc/rtas entries */
+
+ entry = create_proc_entry("progress", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_progress_operations;
+
+ entry = create_proc_entry("clock", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_clock_operations;
+
+ entry = create_proc_entry("poweron", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_poweron_operations;
+
+ create_proc_read_entry("sensors", S_IRUGO, proc_rtas,
+ ppc_rtas_sensor_read, NULL);
+
+ entry = create_proc_entry("frequency", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_freq_operations;
+
+ entry = create_proc_entry("volume", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_volume_operations;
+}
+
+/* ****************************************************************** */
+/* POWER-ON-TIME */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_poweron_write: Invalid time\n");
+ return count;
+ }
+ power_on_time = nowtime; /* save the time */
+
+ to_tm(nowtime, &tm);
+
+ error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting poweron time returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ if (power_on_time == 0)
+ n = sprintf(buf, "Power on time not set\n");
+ else
+ n = sprintf(buf, "%lu\n", power_on_time);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* PROGRESS */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long hex;
+
+ strcpy(progress_led, buf); /* save the string */
+ /* Lets see if the user passed hexdigits */
+ hex = simple_strtoul(buf, NULL, 10);
+
+ ppc_md.progress ((char *)buf, hex);
+ return count;
+
+ /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n = 0;
+ if (progress_led != NULL)
+ n = sprintf (buf, "%s\n", progress_led);
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* CLOCK */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_clock_write: Invalid time\n");
+ return count;
+ }
+
+ to_tm(nowtime, &tm);
+ error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long *ret = kmalloc(4*8, GFP_KERNEL);
+ int n, error;
+
+ error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
+
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+
+ if (error != 0){
+ printk(KERN_WARNING "error: reading the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ n = sprintf (buf, "0");
+ } else {
+ n = sprintf (buf, "%lu\n", mktime(year, mon, day, hour, min, sec));
+ }
+ kfree(ret);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* SENSOR STUFF */
+/* ****************************************************************** */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i,j,n;
+ unsigned long ret;
+ int state, error;
+ char *buffer;
+ int get_sensor_state = rtas_token("get-sensor-state");
+
+ if (count < 0)
+ return -EINVAL;
+
+ /* May not be enough */
+ buffer = kmalloc(MAX_LINELENGTH*MAX_SENSORS, GFP_KERNEL);
+
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(buffer, 0, MAX_LINELENGTH*MAX_SENSORS);
+
+ n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ n += sprintf ( buffer+n, "********************************************************\n");
+
+ if (ppc_rtas_find_all_sensors() != 0) {
+ n += sprintf ( buffer+n, "\nNo sensors are available\n");
+ goto return_string;
+ }
+
+ for (i=0; i<sensors.quant; i++) {
+ j = sensors.sensor[i].quant;
+ /* A sensor may have multiple instances */
+ while (j >= 0) {
+ error = rtas_call(get_sensor_state, 2, 2, &ret,
+ sensors.sensor[i].token, sensors.sensor[i].quant-j);
+ state = (int) ret;
+ n += ppc_rtas_process_sensor(sensors.sensor[i], state, error, buffer+n );
+ n += sprintf (buffer+n, "\n");
+ j--;
+ } /* while */
+ } /* for */
+
+return_string:
+ if (off >= strlen(buffer)) {
+ *eof = 1;
+ kfree(buffer);
+ return 0;
+ }
+ if (n > strlen(buffer) - off)
+ n = strlen(buffer) - off;
+ if (n > count)
+ n = count;
+ else
+ *eof = 1;
+ memcpy(buf, buffer + off, n);
+ *start = buf;
+ kfree(buffer);
+ return n;
+}
+
+/* ****************************************************************** */
+
+int ppc_rtas_find_all_sensors (void)
+{
+ unsigned long *utmp;
+ int len, i, j;
+
+ utmp = (unsigned long *) get_property(rtas_node, "rtas-sensors", &len);
+ if (utmp == NULL) {
+ printk (KERN_ERR "error: could not get rtas-sensors\n");
+ return 1;
+ }
+
+ sensors.quant = len / 8; /* int + int */
+
+ for (i=0, j=0; j<sensors.quant; i+=2, j++) {
+ sensors.sensor[j].token = utmp[i];
+ sensors.sensor[j].quant = utmp[i+1];
+ }
+ return 0;
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string of what rtas returned
+ */
+char * ppc_rtas_process_error(int error)
+{
+ switch (error) {
+ case SENSOR_CRITICAL_HIGH:
+ return "(critical high)";
+ case SENSOR_WARNING_HIGH:
+ return "(warning high)";
+ case SENSOR_NORMAL:
+ return "(normal)";
+ case SENSOR_WARNING_LOW:
+ return "(warning low)";
+ case SENSOR_CRITICAL_LOW:
+ return "(critical low)";
+ case SENSOR_SUCCESS:
+ return "(read ok)";
+ case SENSOR_HW_ERROR:
+ return "(hardware error)";
+ case SENSOR_BUSY:
+ return "(busy)";
+ case SENSOR_NOT_EXIST:
+ return "(non existant)";
+ case SENSOR_DR_ENTITY:
+ return "(dr entity removed)";
+ default:
+ return "(UNKNOWN)";
+ }
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string out of what the sensor said
+ */
+
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf)
+{
+ /* Defined return vales */
+ const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t", "Mainenance" };
+ const char * enclosure_switch[] = { "Closed", "Open" };
+ const char * lid_status[] = { " ", "Open", "Closed" };
+ const char * power_source[] = { "AC\t", "Battery", "AC & Battery" };
+ const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
+ const char * epow_sensor[] = {
+ "EPOW Reset", "Cooling warning", "Power warning",
+ "System shutdown", "System halt", "EPOW main enclosure",
+ "EPOW power off" };
+ const char * battery_cyclestate[] = { "None", "In progress", "Requested" };
+ const char * battery_charging[] = { "Charging", "Discharching", "No current flow" };
+ const char * ibm_drconnector[] = { "Empty", "Present" };
+ const char * ibm_intqueue[] = { "Disabled", "Enabled" };
+
+ int have_strings = 0;
+ int temperature = 0;
+ int unknown = 0;
+ int n = 0;
+
+ /* What kind of sensor do we have here? */
+ switch (s.token) {
+ case KEY_SWITCH:
+ n += sprintf(buf+n, "Key switch:\t");
+ n += sprintf(buf+n, "%s\t", key_switch[state]);
+ have_strings = 1;
+ break;
+ case ENCLOSURE_SWITCH:
+ n += sprintf(buf+n, "Enclosure switch:\t");
+ n += sprintf(buf+n, "%s\t", enclosure_switch[state]);
+ have_strings = 1;
+ break;
+ case THERMAL_SENSOR:
+ n += sprintf(buf+n, "Temp. (°C/°F):\t");
+ temperature = 1;
+ break;
+ case LID_STATUS:
+ n += sprintf(buf+n, "Lid status:\t");
+ n += sprintf(buf+n, "%s\t", lid_status[state]);
+ have_strings = 1;
+ break;
+ case POWER_SOURCE:
+ n += sprintf(buf+n, "Power source:\t");
+ n += sprintf(buf+n, "%s\t", power_source[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_VOLTAGE:
+ n += sprintf(buf+n, "Battery voltage:\t");
+ break;
+ case BATTERY_REMAINING:
+ n += sprintf(buf+n, "Battery remaining:\t");
+ n += sprintf(buf+n, "%s\t", battery_remaining[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_PERCENTAGE:
+ n += sprintf(buf+n, "Battery percentage:\t");
+ break;
+ case EPOW_SENSOR:
+ n += sprintf(buf+n, "EPOW Sensor:\t");
+ n += sprintf(buf+n, "%s\t", epow_sensor[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CYCLESTATE:
+ n += sprintf(buf+n, "Battery cyclestate:\t");
+ n += sprintf(buf+n, "%s\t", battery_cyclestate[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CHARGING:
+ n += sprintf(buf+n, "Battery Charging:\t");
+ n += sprintf(buf+n, "%s\t", battery_charging[state]);
+ have_strings = 1;
+ break;
+ case IBM_SURVEILLANCE:
+ n += sprintf(buf+n, "Surveillance:\t");
+ break;
+ case IBM_FANRPM:
+ n += sprintf(buf+n, "Fan (rpm):\t");
+ break;
+ case IBM_VOLTAGE:
+ n += sprintf(buf+n, "Voltage (mv):\t");
+ break;
+ case IBM_DRCONNECTOR:
+ n += sprintf(buf+n, "DR connector:\t");
+ n += sprintf(buf+n, "%s\t", ibm_drconnector[state]);
+ have_strings = 1;
+ break;
+ case IBM_POWERSUPPLY:
+ n += sprintf(buf+n, "Powersupply:\t");
+ break;
+ case IBM_INTQUEUE:
+ n += sprintf(buf+n, "Interrupt queue:\t");
+ n += sprintf(buf+n, "%s\t", ibm_intqueue[state]);
+ have_strings = 1;
+ break;
+ default:
+ n += sprintf(buf+n, "Unkown sensor (type %d), ignoring it\n",
+ s.token);
+ unknown = 1;
+ have_strings = 1;
+ break;
+ }
+ if (have_strings == 0) {
+ if (temperature) {
+ n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
+ } else
+ n += sprintf(buf+n, "%10d\t", state);
+ }
+ if (unknown == 0) {
+ n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
+ n += get_location_code(s, buf+n);
+ }
+ return n;
+}
+
+/* ****************************************************************** */
+
+int check_location (char *c, int idx, char * buf)
+{
+ int n = 0;
+
+ switch (*(c+idx)) {
+ case LOC_PLANAR:
+ n += sprintf ( buf, "Planar #%c", *(c+idx+1));
+ break;
+ case LOC_CPU:
+ n += sprintf ( buf, "CPU #%c", *(c+idx+1));
+ break;
+ case LOC_FAN:
+ n += sprintf ( buf, "Fan #%c", *(c+idx+1));
+ break;
+ case LOC_RACKMOUNTED:
+ n += sprintf ( buf, "Rack #%c", *(c+idx+1));
+ break;
+ case LOC_VOLTAGE:
+ n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
+ break;
+ case LOC_LCD:
+ n += sprintf ( buf, "LCD #%c", *(c+idx+1));
+ break;
+ case '.':
+ n += sprintf ( buf, "- %c", *(c+idx+1));
+ default:
+ n += sprintf ( buf, "Unknown location");
+ break;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+/*
+ * Format:
+ * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
+ * the '.' may be an abbrevation
+ */
+int check_location_string (char *c, char *buf)
+{
+ int n=0,i=0;
+
+ while (c[i]) {
+ if (isalpha(c[i]) || c[i] == '.') {
+ n += check_location(c, i, buf+n);
+ }
+ else if (c[i] == '/' || c[i] == '-')
+ n += sprintf(buf+n, " at ");
+ i++;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+
+int get_location_code(struct individual_sensor s, char * buffer)
+{
+ char rstr[512], tmp[10], tmp2[10];
+ int n=0, i=0, llen, len;
+ /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
+ char *ret;
+
+ static int pos = 0; /* remember position where buffer was */
+
+ /* construct the sensor number like 0003 */
+ /* fill with zeros */
+ n = sprintf(tmp, "%d", s.token);
+ len = strlen(tmp);
+ while (strlen(tmp) < 4)
+ n += sprintf (tmp+n, "0");
+
+ /* invert the string */
+ while (tmp[i]) {
+ if (i<len)
+ tmp2[4-len+i] = tmp[i];
+ else
+ tmp2[3-i] = tmp[i];
+ i++;
+ }
+ tmp2[4] = '\0';
+
+ sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
+
+ ret = (char *) get_property(rtas_node, rstr, &llen);
+
+ n=0;
+ if (ret[0] == '\0')
+ n += sprintf ( buffer+n, "--- ");/* does not have a location */
+ else {
+ char t[50];
+ ret += pos;
+
+ n += check_location_string(ret, buffer + n);
+ n += sprintf ( buffer+n, " ");
+ /* see how many characters we have printed */
+ sprintf ( t, "%s ", ret);
+
+ pos += strlen(t);
+ if (pos >= llen) pos=0;
+ }
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Frequency */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long freq;
+ char *dest;
+ int error;
+ freq = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
+ return count;
+ }
+ if (freq < 0) freq = 0;
+ rtas_tone_frequency = freq; /* save it for later */
+ error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
+ TONE_FREQUENCY, 0, freq);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_frequency);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Volume */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long volume;
+ char *dest;
+ int error;
+ volume = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
+ return count;
+ }
+ if (volume < 0) volume = 0;
+ if (volume > 100) volume = 100;
+
+ rtas_tone_volume = volume; /* save it for later */
+ error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
+ TONE_VOLUME, 0, volume);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone volume returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_volume);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
--- /dev/null
+/*
+ *
+ * Procedures for interfacing to the RTAS on CHRP machines.
+ *
+ * Peter Bergner, IBM March 2001.
+ * Copyright (C) 2001 IBM.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <asm/init.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/machdep.h>
+#include <asm/Paca.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm/udbg.h>
+
+/*
+ * prom_init() is called very early on, before the kernel text
+ * and data have been mapped to KERNELBASE. At this point the code
+ * is running at whatever address it has been loaded at, so
+ * references to extern and static variables must be relocated
+ * explicitly. The procedure reloc_offset() returns the address
+ * we're currently running at minus the address we were linked at.
+ * (Note that strings count as static variables.)
+ *
+ * Because OF may have mapped I/O devices into the area starting at
+ * KERNELBASE, particularly on CHRP machines, we can't safely call
+ * OF once the kernel has been mapped to KERNELBASE. Therefore all
+ * OF calls should be done within prom_init(), and prom_init()
+ * and all routines called within it must be careful to relocate
+ * references as necessary.
+ *
+ * Note that the bss is cleared *after* prom_init runs, so we have
+ * to make sure that any static or extern variables it accesses
+ * are put in the data segment.
+ */
+
+struct rtas_t rtas = {
+ lock: SPIN_LOCK_UNLOCKED
+};
+
+extern unsigned long reloc_offset(void);
+
+void
+phys_call_rtas(int token, int nargs, int nret, ...)
+{
+ va_list list;
+ unsigned long offset = reloc_offset();
+ struct rtas_args *rtas = PTRRELOC(&(get_paca()->xRtas));
+ int i;
+
+ rtas->token = token;
+ rtas->nargs = nargs;
+ rtas->nret = nret;
+ rtas->rets = (rtas_arg_t *)PTRRELOC(&(rtas->args[nargs]));
+
+ va_start(list, nret);
+ for (i = 0; i < nargs; i++)
+ rtas->args[i] = (rtas_arg_t)LONG_LSW(va_arg(list, ulong));
+ va_end(list);
+
+ enter_rtas(rtas);
+}
+
+void
+phys_call_rtas_display_status(char c)
+{
+ unsigned long offset = reloc_offset();
+ struct rtas_args *rtas = PTRRELOC(&(get_paca()->xRtas));
+
+ rtas->token = 10;
+ rtas->nargs = 1;
+ rtas->nret = 1;
+ rtas->rets = (rtas_arg_t *)PTRRELOC(&(rtas->args[1]));
+ rtas->args[0] = (int)c;
+
+ enter_rtas(rtas);
+}
+
+void
+call_rtas_display_status(char c)
+{
+ struct rtas_args *rtas = &(get_paca()->xRtas);
+
+ rtas->token = 10;
+ rtas->nargs = 1;
+ rtas->nret = 1;
+ rtas->rets = (rtas_arg_t *)&(rtas->args[1]);
+ rtas->args[0] = (int)c;
+
+ enter_rtas((void *)__pa((unsigned long)rtas));
+}
+
+#if 0
+#define DEBUG_RTAS
+#endif
+__openfirmware
+int
+rtas_token(const char *service)
+{
+ int *tokp;
+ if (rtas.dev == NULL) {
+#ifdef DEBUG_RTAS
+ udbg_printf("\tNo rtas device in device-tree...\n");
+#endif /* DEBUG_RTAS */
+ return RTAS_UNKNOWN_SERVICE;
+ }
+ tokp = (int *) get_property(rtas.dev, service, NULL);
+ return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
+}
+
+__openfirmware
+long
+rtas_call(int token, int nargs, int nret,
+ unsigned long *outputs, ...)
+{
+ va_list list;
+ int i;
+ unsigned long s;
+ struct rtas_args *rtas_args = &(get_paca()->xRtas);
+
+#ifdef DEBUG_RTAS
+ udbg_printf("Entering rtas_call\n");
+ udbg_printf("\ttoken = 0x%x\n", token);
+ udbg_printf("\tnargs = %d\n", nargs);
+ udbg_printf("\tnret = %d\n", nret);
+ udbg_printf("\t&outputs = 0x%lx\n", outputs);
+#endif /* DEBUG_RTAS */
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -1;
+
+ rtas_args->token = token;
+ rtas_args->nargs = nargs;
+ rtas_args->nret = nret;
+ rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]);
+ va_start(list, outputs);
+ for (i = 0; i < nargs; ++i) {
+ rtas_args->args[i] = (rtas_arg_t)LONG_LSW(va_arg(list, ulong));
+#ifdef DEBUG_RTAS
+ udbg_printf("\tnarg[%d] = 0x%lx\n", i, rtas_args->args[i]);
+#endif /* DEBUG_RTAS */
+ }
+ va_end(list);
+
+ for (i = 0; i < nret; ++i)
+ rtas_args->rets[i] = 0;
+
+#if 0 /* Gotta do something different here, use global lock for now... */
+ spin_lock_irqsave(&rtas_args->lock, s);
+#else
+ spin_lock_irqsave(&rtas.lock, s);
+#endif
+#ifdef DEBUG_RTAS
+ udbg_printf("\tentering rtas with 0x%lx\n", (void *)__pa((unsigned long)rtas_args));
+#endif /* DEBUG_RTAS */
+ enter_rtas((void *)__pa((unsigned long)rtas_args));
+#ifdef DEBUG_RTAS
+ udbg_printf("\treturned from rtas ...\n");
+#endif /* DEBUG_RTAS */
+#if 0 /* Gotta do something different here, use global lock for now... */
+ spin_unlock_irqrestore(&rtas_args->lock, s);
+#else
+ spin_unlock_irqrestore(&rtas.lock, s);
+#endif
+#ifdef DEBUG_RTAS
+ for(i=0; i < nret ;i++)
+ udbg_printf("\tnret[%d] = 0x%lx\n", i, (ulong)rtas_args->rets[i]);
+#endif /* DEBUG_RTAS */
+
+ if (nret > 1 && outputs != NULL)
+ for (i = 0; i < nret-1; ++i)
+ outputs[i] = rtas_args->rets[i+1];
+ return (ulong)((nret > 0) ? rtas_args->rets[0] : 0);
+}
+
+void __chrp
+rtas_restart(char *cmd)
+{
+ printk("RTAS system-reboot returned %ld\n",
+ rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
+ for (;;);
+}
+
+void __chrp
+rtas_power_off(void)
+{
+ /* allow power on only with power button press */
+ printk("RTAS power-off returned %ld\n",
+ rtas_call(rtas_token("power-off"), 2, 1, NULL,0xffffffff,0xffffffff));
+ for (;;);
+}
+
+void __chrp
+rtas_halt(void)
+{
+ rtas_power_off();
+}
--- /dev/null
+/*
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Communication to userspace based on kernel/printk.c
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/rtas.h>
+#include <asm/prom.h>
+
+#if 0
+#define DEBUG(A...) printk(KERN_ERR A)
+#else
+#define DEBUG(A...)
+#endif
+
+static spinlock_t rtas_log_lock = SPIN_LOCK_UNLOCKED;
+
+DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait);
+
+#define LOG_NUMBER 64 /* must be a power of two */
+#define LOG_NUMBER_MASK (LOG_NUMBER-1)
+
+static char *rtas_log_buf;
+static unsigned long rtas_log_start;
+static unsigned long rtas_log_size;
+
+static int surveillance_requested;
+static unsigned int rtas_event_scan_rate;
+static unsigned int rtas_error_log_max;
+
+#define EVENT_SCAN_ALL_EVENTS 0xf0000000
+#define SURVEILLANCE_TOKEN 9000
+#define SURVEILLANCE_TIMEOUT 1
+#define SURVEILLANCE_SCANRATE 1
+
+/*
+ * Since we use 32 bit RTAS, the physical address of this must be below
+ * 4G or else bad things happen. Allocate this in the kernel data and
+ * make it big enough.
+ */
+#define RTAS_ERROR_LOG_MAX 1024
+static unsigned char logdata[RTAS_ERROR_LOG_MAX];
+
+static int rtas_log_open(struct inode * inode, struct file * file)
+{
+ return 0;
+}
+
+static int rtas_log_release(struct inode * inode, struct file * file)
+{
+ return 0;
+}
+
+static ssize_t rtas_log_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int error;
+ char *tmp;
+ unsigned long offset;
+
+ if (!buf || count < rtas_error_log_max)
+ return -EINVAL;
+
+ count = rtas_error_log_max;
+
+ error = verify_area(VERIFY_WRITE, buf, count);
+ if (error)
+ return -EINVAL;
+
+ tmp = kmalloc(rtas_error_log_max, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
+ if (error)
+ goto out;
+
+ spin_lock(&rtas_log_lock);
+ offset = rtas_error_log_max * (rtas_log_start & LOG_NUMBER_MASK);
+ memcpy(tmp, &rtas_log_buf[offset], count);
+ rtas_log_start += 1;
+ rtas_log_size -= 1;
+ spin_unlock(&rtas_log_lock);
+
+ copy_to_user(buf, tmp, count);
+ error = count;
+
+out:
+ kfree(tmp);
+ return error;
+}
+
+static unsigned int rtas_log_poll(struct file *file, poll_table * wait)
+{
+ poll_wait(file, &rtas_log_wait, wait);
+ if (rtas_log_size)
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+struct file_operations proc_rtas_log_operations = {
+ read: rtas_log_read,
+ poll: rtas_log_poll,
+ open: rtas_log_open,
+ release: rtas_log_release,
+};
+
+static void log_rtas(char *buf)
+{
+ unsigned long offset;
+
+ DEBUG("logging rtas event\n");
+
+ spin_lock(&rtas_log_lock);
+
+ offset = rtas_error_log_max *
+ ((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK);
+
+ memcpy(&rtas_log_buf[offset], buf, rtas_error_log_max);
+
+ if (rtas_log_size < LOG_NUMBER)
+ rtas_log_size += 1;
+ else
+ rtas_log_start += 1;
+
+ spin_unlock(&rtas_log_lock);
+ wake_up_interruptible(&rtas_log_wait);
+}
+
+static int enable_surveillance(void)
+{
+ int error;
+
+ error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL, SURVEILLANCE_TOKEN,
+ 0, SURVEILLANCE_TIMEOUT);
+
+ if (error) {
+ printk(KERN_ERR "rtasd: could not enable surveillance\n");
+ return -1;
+ }
+
+ rtas_event_scan_rate = SURVEILLANCE_SCANRATE;
+
+ return 0;
+}
+
+static int get_eventscan_parms(void)
+{
+ struct device_node *node;
+ int *ip;
+
+ node = find_path_device("/rtas");
+
+ ip = (int *)get_property(node, "rtas-event-scan-rate", NULL);
+ if (ip == NULL) {
+ printk(KERN_ERR "rtasd: no rtas-event-scan-rate\n");
+ return -1;
+ }
+ rtas_event_scan_rate = *ip;
+ DEBUG("rtas-event-scan-rate %d\n", rtas_event_scan_rate);
+
+ ip = (int *)get_property(node, "rtas-error-log-max", NULL);
+ if (ip == NULL) {
+ printk(KERN_ERR "rtasd: no rtas-error-log-max\n");
+ return -1;
+ }
+ rtas_error_log_max = *ip;
+ DEBUG("rtas-error-log-max %d\n", rtas_error_log_max);
+
+ if (rtas_error_log_max > RTAS_ERROR_LOG_MAX) {
+ printk(KERN_ERR "rtasd: truncated error log from %d to %d bytes\n", rtas_error_log_max, RTAS_ERROR_LOG_MAX);
+ rtas_error_log_max = RTAS_ERROR_LOG_MAX;
+ }
+
+ return 0;
+}
+
+extern long sys_sched_get_priority_max(int policy);
+
+static int rtasd(void *unused)
+{
+ int cpu = 0;
+ int error;
+ int first_pass = 1;
+ int event_scan = rtas_token("event-scan");
+
+ if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1)
+ goto error;
+
+ rtas_log_buf = vmalloc(rtas_error_log_max*LOG_NUMBER);
+ if (!rtas_log_buf) {
+ printk(KERN_ERR "rtasd: no memory\n");
+ goto error;
+ }
+
+ DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2);
+
+ daemonize();
+ sigfillset(¤t->blocked);
+ sprintf(current->comm, "rtasd");
+
+#if 0
+ /* Rusty unreal time task */
+ current->policy = SCHED_FIFO;
+ current->nice = sys_sched_get_priority_max(SCHED_FIFO) + 1;
+#endif
+
+ cpu = 0;
+ set_cpus_allowed(current, 1UL << cpu_logical_map(cpu));
+ schedule();
+
+ while(1) {
+ do {
+ memset(logdata, 0, rtas_error_log_max);
+ error = rtas_call(event_scan, 4, 1, NULL,
+ EVENT_SCAN_ALL_EVENTS, 0,
+ __pa(logdata), rtas_error_log_max);
+ if (error == -1) {
+ printk(KERN_ERR "event-scan failed\n");
+ break;
+ }
+
+ if (error == 0)
+ log_rtas(logdata);
+
+ } while(error == 0);
+
+ DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());
+
+ cpu++;
+ if (cpu >= smp_num_cpus) {
+
+ if (first_pass && surveillance_requested) {
+ DEBUG("enabling surveillance\n");
+ if (enable_surveillance())
+ goto error_vfree;
+ DEBUG("surveillance enabled\n");
+ }
+
+ first_pass = 0;
+ cpu = 0;
+ }
+
+ set_cpus_allowed(current, 1UL << cpu_logical_map(cpu));
+
+
+ /* Check all cpus for pending events before sleeping*/
+ if (first_pass) {
+ schedule();
+ } else {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
+ }
+ }
+
+error_vfree:
+ vfree(rtas_log_buf);
+error:
+ /* Should delete proc entries */
+ return -EINVAL;
+}
+
+static void __init rtas_init(void)
+{
+ struct proc_dir_entry *rtas_dir, *entry;
+
+ rtas_dir = proc_mkdir("rtas", 0);
+ if (!rtas_dir) {
+ printk(KERN_ERR "Failed to create rtas proc directory\n");
+ } else {
+ entry = create_proc_entry("error_log", S_IRUSR, rtas_dir);
+ if (entry)
+ entry->proc_fops = &proc_rtas_log_operations;
+ else
+ printk(KERN_ERR "Failed to create rtas/error_log proc entry\n");
+ }
+
+ if (kernel_thread(rtasd, 0, CLONE_FS) < 0)
+ printk(KERN_ERR "Failed to start RTAS daemon\n");
+
+ printk(KERN_ERR "RTAS daemon started\n");
+}
+
+static int __init surveillance_setup(char *str)
+{
+ int i;
+
+ if (get_option(&str,&i)) {
+ if (i == 1)
+ surveillance_requested = 1;
+ }
+
+ return 1;
+}
+
+__initcall(rtas_init);
+__setup("surveillance=", surveillance_setup);
--- /dev/null
+/*
+ * Real Time Clock interface for PPC64.
+ *
+ * Based on rtc.c by Paul Gortmaker
+ *
+ * This driver allows use of the real time clock
+ * from user space. It exports the /dev/rtc
+ * interface supporting various ioctl() and also the
+ * /proc/driver/rtc pseudo-file for status information.
+ *
+ * Interface does not support RTC interrupts nor an alarm.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * 1.0 Mike Corrigan: IBM iSeries rtc support
+ * 1.1 Dave Engebretsen: IBM pSeries rtc support
+ */
+
+#define RTC_VERSION "1.1"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/mc146818rtc.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/time.h>
+
+#include <asm/iSeries/LparData.h>
+#include <asm/iSeries/mf.h>
+#include <asm/machdep.h>
+#include <asm/iSeries/ItSpCommArea.h>
+
+extern int piranha_simulator;
+
+/*
+ * We sponge a minor off of the misc major. No need slurping
+ * up another valuable major dev number for this. If you add
+ * an ioctl, make sure you don't conflict with SPARC's RTC
+ * ioctls.
+ */
+
+static loff_t rtc_llseek(struct file *file, loff_t offset, int origin);
+
+static ssize_t rtc_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
+
+static int rtc_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+static int rtc_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+
+/*
+ * If this driver ever becomes modularised, it will be really nice
+ * to make the epoch retain its value across module reload...
+ */
+
+static unsigned long epoch = 1900; /* year corresponding to 0x00 */
+
+static const unsigned char days_in_mo[] =
+{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+/*
+ * Now all the various file operations that we export.
+ */
+
+static loff_t rtc_llseek(struct file *file, loff_t offset, int origin)
+{
+ return -ESPIPE;
+}
+
+static ssize_t rtc_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ return -EIO;
+}
+
+static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rtc_time wtime;
+
+ switch (cmd) {
+ case RTC_RD_TIME: /* Read the time/date from RTC */
+ {
+ ppc_md.get_rtc_time(&wtime);
+ break;
+ }
+ case RTC_SET_TIME: /* Set the RTC */
+ {
+ struct rtc_time rtc_tm;
+ unsigned char mon, day, hrs, min, sec, leap_yr;
+ unsigned int yrs;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+
+ if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
+ sizeof(struct rtc_time)))
+ return -EFAULT;
+
+ yrs = rtc_tm.tm_year;
+ mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
+ day = rtc_tm.tm_mday;
+ hrs = rtc_tm.tm_hour;
+ min = rtc_tm.tm_min;
+ sec = rtc_tm.tm_sec;
+
+ if (yrs < 70)
+ return -EINVAL;
+
+ leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
+
+ if ((mon > 12) || (day == 0))
+ return -EINVAL;
+
+ if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+ return -EINVAL;
+
+ if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+ return -EINVAL;
+
+ if ( yrs > 169 )
+ return -EINVAL;
+
+ ppc_md.set_rtc_time(&rtc_tm);
+
+ return 0;
+ }
+ case RTC_EPOCH_READ: /* Read the epoch. */
+ {
+ return put_user (epoch, (unsigned long *)arg);
+ }
+ case RTC_EPOCH_SET: /* Set the epoch. */
+ {
+ /*
+ * There were no RTC clocks before 1900.
+ */
+ if (arg < 1900)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+
+ epoch = arg;
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+ return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
+}
+
+static int rtc_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int rtc_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/*
+ * The various file operations we support.
+ */
+static struct file_operations rtc_fops = {
+ owner: THIS_MODULE,
+ llseek: rtc_llseek,
+ read: rtc_read,
+ ioctl: rtc_ioctl,
+ open: rtc_open,
+ release: rtc_release,
+};
+
+static struct miscdevice rtc_dev=
+{
+ RTC_MINOR,
+ "rtc",
+ &rtc_fops
+};
+
+static int __init rtc_init(void)
+{
+ misc_register(&rtc_dev);
+ create_proc_read_entry ("driver/rtc", 0, 0, rtc_read_proc, NULL);
+
+ printk(KERN_INFO "i/pSeries Real Time Clock Driver v" RTC_VERSION "\n");
+
+ return 0;
+}
+
+static void __exit rtc_exit (void)
+{
+ remove_proc_entry ("driver/rtc", NULL);
+ misc_deregister(&rtc_dev);
+}
+
+module_init(rtc_init);
+module_exit(rtc_exit);
+EXPORT_NO_SYMBOLS;
+
+/*
+ * Info exported via "/proc/driver/rtc".
+ */
+
+static int rtc_proc_output (char *buf)
+{
+
+ char *p;
+ struct rtc_time tm;
+
+ p = buf;
+
+ ppc_md.get_rtc_time(&tm);
+
+ /*
+ * There is no way to tell if the luser has the RTC set for local
+ * time or for Universal Standard Time (GMT). Probably local though.
+ */
+ p += sprintf(p,
+ "rtc_time\t: %02d:%02d:%02d\n"
+ "rtc_date\t: %04d-%02d-%02d\n"
+ "rtc_epoch\t: %04lu\n",
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
+
+ p += sprintf(p,
+ "DST_enable\t: no\n"
+ "BCD\t\t: yes\n"
+ "24hr\t\t: yes\n" );
+
+ return p - buf;
+}
+
+static int rtc_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = rtc_proc_output (page);
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+/*
+ * Get the RTC from the virtual service processor
+ * This requires flowing LpEvents to the primary partition
+ */
+void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
+{
+ if (piranha_simulator)
+ return;
+
+ mf_getRtc(rtc_tm);
+ rtc_tm->tm_mon--;
+}
+
+
+void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
+{
+ unsigned long ret[8];
+ int error;
+ int count;
+
+ /*
+ * error -2 is clock busy, we keep retrying a few times to see
+ * if it will come good -- paulus
+ */
+ count = 0;
+ do {
+ error = rtas_call(rtas_token("get-time-of-day"), 0, 8, (void *)&ret);
+ } while (error == -2 && ++count < 1000);
+
+ if (error != 0) {
+ printk(KERN_WARNING "error: reading the clock failed (%d)\n",
+ error);
+ return;
+ }
+
+ rtc_tm->tm_sec = ret[5];
+ rtc_tm->tm_min = ret[4];
+ rtc_tm->tm_hour = ret[3];
+ rtc_tm->tm_mday = ret[2];
+ rtc_tm->tm_mon = ret[1] - 1;
+ rtc_tm->tm_year = ret[0] - 1900;
+}
+
+int pSeries_set_rtc_time(struct rtc_time *tm)
+{
+ int error;
+ int count;
+
+ /*
+ * error -2 is clock busy, we keep retrying a few times to see
+ * if it will come good -- paulus
+ */
+ count = 0;
+ do {
+ error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
+ tm->tm_year + 1900, tm->tm_mon + 1,
+ tm->tm_mday, tm->tm_hour, tm->tm_min,
+ tm->tm_sec, 0);
+ } while (error == -2 && ++count < 1000);
+
+ if (error != 0)
+ printk(KERN_WARNING "error: setting the clock failed (%d)\n",
+ error);
+
+ return 0;
+}
+
+/*
+ * Set the RTC in the virtual service processor
+ * This requires flowing LpEvents to the primary partition
+ */
+int iSeries_set_rtc_time(struct rtc_time *tm)
+{
+ mf_setRtc(tm);
+ return 0;
+}
+
+void iSeries_get_boot_time(struct rtc_time *tm)
+{
+ unsigned long time;
+ static unsigned long lastsec = 1;
+
+ u32 dataWord1 = *((u32 *)(&xSpCommArea.xBcdTimeAtIplStart));
+ u32 dataWord2 = *(((u32 *)&(xSpCommArea.xBcdTimeAtIplStart)) + 1);
+ int year = 1970;
+ int year1 = ( dataWord1 >> 24 ) & 0x000000FF;
+ int year2 = ( dataWord1 >> 16 ) & 0x000000FF;
+ int sec = ( dataWord1 >> 8 ) & 0x000000FF;
+ int min = dataWord1 & 0x000000FF;
+ int hour = ( dataWord2 >> 24 ) & 0x000000FF;
+ int day = ( dataWord2 >> 8 ) & 0x000000FF;
+ int mon = dataWord2 & 0x000000FF;
+
+ if ( piranha_simulator )
+ return;
+
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year1);
+ BCD_TO_BIN(year2);
+ year = year1 * 100 + year2;
+
+ time = mktime(year, mon, day, hour, min, sec);
+ time += ( jiffies / HZ );
+
+ /* Now THIS is a nasty hack!
+ * It ensures that the first two calls get different answers.
+ * That way the loop in init_time (time.c) will not think
+ * the clock is stuck.
+ */
+ if ( lastsec ) {
+ time -= lastsec;
+ --lastsec;
+ }
+
+ to_tm(time, tm);
+ tm->tm_year -= 1900;
+ tm->tm_mon -= 1;
+}
--- /dev/null
+/*
+ *
+ *
+ * PowerPC-specific semaphore code.
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
+ * to eliminate the SMP races in the old version between the updates
+ * of `count' and `waking'. Now we use negative `count' values to
+ * indicate that some process(es) are waiting for the semaphore.
+ */
+
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ * old_count = sem->count;
+ * tmp = MAX(old_count, 0) + incr;
+ * sem->count = tmp;
+ * return old_count;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+ int old_count, tmp;
+
+ __asm__ __volatile__("\n"
+"1: lwarx %0,0,%3\n"
+" srawi %1,%0,31\n"
+" andc %1,%0,%1\n"
+" add %1,%1,%4\n"
+" stwcx. %1,0,%3\n"
+" bne 1b"
+ : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+ : "r" (&sem->count), "r" (incr), "m" (sem->count)
+ : "cc");
+
+ return old_count;
+}
+
+void __up(struct semaphore *sem)
+{
+ /*
+ * Note that we incremented count in up() before we came here,
+ * but that was ineffective since the result was <= 0, and
+ * any negative value of count is equivalent to 0.
+ * This ends up setting count to 1, unless count is now > 0
+ * (i.e. because some other cpu has called up() in the meantime),
+ * in which case we just increment count.
+ */
+ __sem_update_count(sem, 1);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Note that when we come in to __down or __down_interruptible,
+ * we have already decremented count, but that decrement was
+ * ineffective since the result was < 0, and any negative value
+ * of count is equivalent to 0.
+ * Thus it is only when we decrement count from some value > 0
+ * that we have actually got the semaphore.
+ */
+void __down(struct semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
+ smp_wmb();
+
+ /*
+ * Try to get the semaphore. If the count is > 0, then we've
+ * got the semaphore; we decrement count and exit the loop.
+ * If the count is 0 or negative, we set it to -1, indicating
+ * that we are asleep, and then sleep.
+ */
+ while (__sem_update_count(sem, -1) <= 0) {
+ schedule();
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ }
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /*
+ * If there are any more sleepers, wake one of them up so
+ * that it can either get the semaphore, or set count to -1
+ * indicating that there are still processes sleeping.
+ */
+ wake_up(&sem->wait);
+}
+
+int __down_interruptible(struct semaphore * sem)
+{
+ int retval = 0;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ tsk->state = TASK_INTERRUPTIBLE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
+ smp_wmb();
+
+ while (__sem_update_count(sem, -1) <= 0) {
+ if (signal_pending(current)) {
+ /*
+ * A signal is pending - give up trying.
+ * Set sem->count to 0 if it is negative,
+ * since we are no longer sleeping.
+ */
+ __sem_update_count(sem, 0);
+ retval = -EINTR;
+ break;
+ }
+ schedule();
+ tsk->state = TASK_INTERRUPTIBLE;
+ }
+ tsk->state = TASK_RUNNING;
+ remove_wait_queue(&sem->wait, &wait);
+ wake_up(&sem->wait);
+ return retval;
+}
--- /dev/null
+/*
+ *
+ * Common boot and setup code.
+ *
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/blk.h>
+#include <linux/ide.h>
+#include <linux/seq_file.h>
+#include <linux/ioport.h>
+#include <linux/tty.h>
+#include <asm/init.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/bootinfo.h>
+#include <asm/smp.h>
+#include <asm/elf.h>
+#include <asm/machdep.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/Naca.h>
+#include <asm/Paca.h>
+#include <asm/ppcdebug.h>
+#include <asm/time.h>
+
+extern unsigned long klimit;
+/* extern void *stab; */
+extern HTAB htab_data;
+extern unsigned long loops_per_jiffy;
+
+extern unsigned long embedded_sysmap_start;
+extern unsigned long embedded_sysmap_end;
+
+int have_of = 1;
+
+extern void chrp_init(unsigned long r3,
+ unsigned long r4,
+ unsigned long r5,
+ unsigned long r6,
+ unsigned long r7);
+
+extern void chrp_init_map_io_space( void );
+extern void iSeries_init( void );
+extern void iSeries_init_early( void );
+extern void pSeries_init_early( void );
+extern void pSeriesLP_init_early(void);
+extern void mm_init_ppc64( void );
+
+unsigned long decr_overclock = 1;
+unsigned long decr_overclock_proc0 = 1;
+unsigned long decr_overclock_set = 0;
+unsigned long decr_overclock_proc0_set = 0;
+
+#ifdef CONFIG_XMON
+extern void xmon_map_scc(void);
+#endif
+
+char saved_command_line[256];
+unsigned char aux_device_present;
+
+void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7);
+int parse_bootinfo(void);
+
+int _machine = _MACH_unknown;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+unsigned long SYSRQ_KEY;
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+struct machdep_calls ppc_md;
+struct Naca *naca;
+
+/*
+ * Perhaps we can put the pmac screen_info[] here
+ * on pmac as well so we don't need the ifdef's.
+ * Until we get multiple-console support in here
+ * that is. -- Cort
+ * Maybe tie it to serial consoles, since this is really what
+ * these processors use on existing boards. -- Dan
+ */
+struct screen_info screen_info = {
+ 0, 25, /* orig-x, orig-y */
+ 0, /* unused */
+ 0, /* orig-video-page */
+ 0, /* orig-video-mode */
+ 80, /* orig-video-cols */
+ 0,0,0, /* ega_ax, ega_bx, ega_cx */
+ 25, /* orig-video-lines */
+ 1, /* orig-video-isVGA */
+ 16 /* orig-video-points */
+};
+
+/*
+ * These are used in binfmt_elf.c to put aux entries on the stack
+ * for each elf executable being started.
+ */
+int dcache_bsize;
+int icache_bsize;
+int ucache_bsize;
+
+/*
+ * Initialize the PPCDBG state. Called before relocation has been enabled.
+ */
+void ppcdbg_initialize(void) {
+ unsigned long offset = reloc_offset();
+ struct Naca *_naca = RELOC(naca);
+
+ _naca->debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */;
+}
+
+/*
+ * Initialize a set of PACA's, one for each processor.
+ *
+ * At this point, relocation is on, but we have not done any other
+ * setup of the mm subsystem.
+ */
+void paca_init(void) {
+#if 0
+ int processorCount = naca->processorCount, i;
+ struct Paca *paca[];
+
+ /* Put the array of paca's on a page boundary & allocate 1/2 page of */
+ /* storage for each. */
+ klimit += (PAGE_SIZE-1) & PAGE_MASK;
+ naca->xPaca = paca[0] = klimit;
+ klimit += ((PAGE_SIZE>>1) * processorCount);
+
+ for(i=0; i<processorCount; i++) {
+ paca[0]->xPacaIndex = i;
+ }
+#endif
+}
+
+/*
+ * Do some initial setup of the system. The paramters are those which
+ * were passed in from the bootloader.
+ */
+void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ /* This should be fixed properly in kernel/resource.c */
+ iomem_resource.end = MEM_SPACE_LIMIT;
+
+ /* pSeries systems are identified in prom.c via OF. */
+ if ( itLpNaca.xLparInstalled == 1 )
+ _machine = _MACH_iSeries;
+ switch (_machine) {
+ case _MACH_iSeries:
+ iSeries_init_early();
+ break;
+
+#ifdef CONFIG_PPC_PSERIES
+ case _MACH_pSeries:
+ pSeries_init_early();
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = initrd_end = 0;
+#endif
+ parse_bootinfo();
+ break;
+
+ case _MACH_pSeriesLP:
+ pSeriesLP_init_early();
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = initrd_end = 0;
+#endif
+ parse_bootinfo();
+ break;
+#endif
+ }
+
+ udbg_puts("\n-----------------------------------------------------\n");
+ udbg_puts("Naca Info...\n\n");
+ udbg_puts("naca = 0x");
+ udbg_puthex((unsigned long)naca);
+ udbg_putc('\n');
+
+ udbg_puts("naca->processorCount = 0x");
+ udbg_puthex(naca->processorCount);
+ udbg_putc('\n');
+
+ udbg_puts("naca->physicalMemorySize = 0x");
+ udbg_puthex(naca->physicalMemorySize);
+ udbg_putc('\n');
+
+ udbg_puts("naca->dCacheL1LineSize = 0x");
+ udbg_puthex(naca->dCacheL1LineSize);
+ udbg_putc('\n');
+
+ udbg_puts("naca->dCacheL1LogLineSize = 0x");
+ udbg_puthex(naca->dCacheL1LogLineSize);
+ udbg_putc('\n');
+
+ udbg_puts("naca->dCacheL1LinesPerPage = 0x");
+ udbg_puthex(naca->dCacheL1LinesPerPage);
+ udbg_putc('\n');
+
+ udbg_puts("naca->iCacheL1LineSize = 0x");
+ udbg_puthex(naca->iCacheL1LineSize);
+ udbg_putc('\n');
+
+ udbg_puts("naca->iCacheL1LogLineSize = 0x");
+ udbg_puthex(naca->iCacheL1LogLineSize);
+ udbg_putc('\n');
+
+ udbg_puts("naca->iCacheL1LinesPerPage = 0x");
+ udbg_puthex(naca->iCacheL1LinesPerPage);
+ udbg_putc('\n');
+
+ udbg_puts("naca->pftSize = 0x");
+ udbg_puthex(naca->pftSize);
+ udbg_putc('\n');
+
+ udbg_puts("naca->serialPortAddr = 0x");
+ udbg_puthex(naca->serialPortAddr);
+ udbg_putc('\n');
+
+ udbg_puts("naca->interrupt_controller = 0x");
+ udbg_puthex(naca->interrupt_controller);
+ udbg_putc('\n');
+
+ udbg_printf("\nHTAB Info ...\n\n");
+ udbg_puts("htab_data.htab = 0x");
+ udbg_puthex((unsigned long)htab_data.htab);
+ udbg_putc('\n');
+ udbg_puts("htab_data.num_ptegs = 0x");
+ udbg_puthex(htab_data.htab_num_ptegs);
+ udbg_putc('\n');
+
+ udbg_puts("\n-----------------------------------------------------\n");
+
+
+ if ( _machine & _MACH_pSeries ) {
+ finish_device_tree();
+ chrp_init(r3, r4, r5, r6, r7);
+ }
+
+ mm_init_ppc64();
+
+ switch (_machine) {
+ case _MACH_iSeries:
+ iSeries_init();
+ break;
+ default:
+ /* The following relies on the device tree being */
+ /* fully configured. */
+ parse_cmd_line(r3, r4, r5, r6, r7);
+ }
+}
+
+void machine_restart(char *cmd)
+{
+ ppc_md.restart(cmd);
+}
+
+void machine_power_off(void)
+{
+ ppc_md.power_off();
+}
+
+void machine_halt(void)
+{
+ ppc_md.halt();
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long cpu_id = (unsigned long)v - 1;
+ unsigned int pvr;
+ unsigned short maj;
+ unsigned short min;
+
+#ifdef CONFIG_SMP
+ if (cpu_id == NR_CPUS) {
+ unsigned long bogosum = smp_num_cpus * loops_per_jiffy;
+ seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+ bogosum/(500000/HZ),
+ bogosum/(5000/HZ) % 100);
+
+ if (ppc_md.get_cpuinfo != NULL)
+ ppc_md.get_cpuinfo(m);
+
+ return 0;
+ }
+
+ if (!(cpu_online_map & (1<<cpu_id)))
+ return 0;
+#endif
+
+ pvr = xPaca[cpu_id].pvr;
+ maj = (pvr >> 8) & 0xFF;
+ min = pvr & 0xFF;
+
+ seq_printf(m, "processor\t: %lu\n", cpu_id);
+ seq_printf(m, "cpu\t\t: ");
+
+ pvr = xPaca[cpu_id].pvr;
+
+ switch (PVR_VER(pvr)) {
+ case PV_PULSAR:
+ seq_printf(m, "RS64-III (pulsar)\n");
+ break;
+ case PV_POWER4:
+ seq_printf(m, "POWER4 (gp)\n");
+ break;
+ case PV_ICESTAR:
+ seq_printf(m, "RS64-III (icestar)\n");
+ break;
+ case PV_SSTAR:
+ seq_printf(m, "RS64-IV (sstar)\n");
+ break;
+ case PV_630:
+ seq_printf(m, "POWER3 (630)\n");
+ break;
+ case PV_630p:
+ seq_printf(m, "POWER3 (630+)\n");
+ break;
+ default:
+ seq_printf(m, "Unknown (%08x)\n", pvr);
+ break;
+ }
+
+ /*
+ * Assume here that all clock rates are the same in a
+ * smp system. -- Cort
+ */
+ if (_machine != _MACH_iSeries) {
+ struct device_node *cpu_node;
+ int *fp;
+
+ cpu_node = find_type_devices("cpu");
+ if (cpu_node) {
+ fp = (int *) get_property(cpu_node, "clock-frequency",
+ NULL);
+ if (fp)
+ seq_printf(m, "clock\t\t: %dMHz\n",
+ *fp / 1000000);
+ }
+ }
+
+ if (ppc_md.setup_residual != NULL)
+ ppc_md.setup_residual(m, cpu_id);
+
+ seq_printf(m, "revision\t: %hd.%hd\n", maj, min);
+
+ seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
+ loops_per_jiffy/(500000/HZ),
+ loops_per_jiffy/(5000/HZ) % 100);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+ start: c_start,
+ next: c_next,
+ stop: c_stop,
+ show: show_cpuinfo,
+};
+
+/*
+ * Fetch the cmd_line from open firmware. */
+void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ struct device_node *chosen;
+ char *p;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if ((initrd_start == 0) && r3 && r4 && r4 != 0xdeadbeef) {
+ initrd_start = (r3 >= KERNELBASE) ? r3 : (unsigned long)__va(r3);
+ initrd_end = initrd_start + r4;
+ ROOT_DEV = mk_kdev(RAMDISK_MAJOR, 0);
+ initrd_below_start_ok = 1;
+ }
+#endif
+
+ cmd_line[0] = 0;
+ chosen = find_devices("chosen");
+ if (chosen != NULL) {
+ p = get_property(chosen, "bootargs", NULL);
+ if (p != NULL)
+ strncpy(cmd_line, p, sizeof(cmd_line));
+ }
+ cmd_line[sizeof(cmd_line) - 1] = 0;
+
+ /* Look for mem= option on command line */
+ if (strstr(cmd_line, "mem=")) {
+ char *p, *q;
+ unsigned long maxmem = 0;
+ extern unsigned long __max_memory;
+
+ for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
+ q = p + 4;
+ if (p > cmd_line && p[-1] != ' ')
+ continue;
+ maxmem = simple_strtoul(q, &q, 0);
+ if (*q == 'k' || *q == 'K') {
+ maxmem <<= 10;
+ ++q;
+ } else if (*q == 'm' || *q == 'M') {
+ maxmem <<= 20;
+ ++q;
+ }
+ }
+ __max_memory = maxmem;
+ }
+ ppc_md.progress("id mach: done", 0x200);
+}
+
+
+char *bi_tag2str(unsigned long tag)
+{
+ switch (tag) {
+ case BI_FIRST:
+ return "BI_FIRST";
+ case BI_LAST:
+ return "BI_LAST";
+ case BI_CMD_LINE:
+ return "BI_CMD_LINE";
+ case BI_BOOTLOADER_ID:
+ return "BI_BOOTLOADER_ID";
+ case BI_INITRD:
+ return "BI_INITRD";
+ case BI_SYSMAP:
+ return "BI_SYSMAP";
+ case BI_MACHTYPE:
+ return "BI_MACHTYPE";
+ default:
+ return "BI_UNKNOWN";
+ }
+}
+
+int parse_bootinfo(void)
+{
+ struct bi_record *rec;
+ extern char *sysmap;
+ extern unsigned long sysmap_size;
+
+ rec = prom.bi_recs;
+
+ if ( rec == NULL || rec->tag != BI_FIRST )
+ return -1;
+
+ for ( ; rec->tag != BI_LAST ; rec = bi_rec_next(rec) ) {
+ switch (rec->tag) {
+ case BI_CMD_LINE:
+ memcpy(cmd_line, (void *)rec->data, rec->size);
+ break;
+ case BI_SYSMAP:
+ sysmap = (char *)((rec->data[0] >= (KERNELBASE))
+ ? rec->data[0] : (unsigned long)__va(rec->data[0]));
+ sysmap_size = rec->data[1];
+ break;
+#ifdef CONFIG_BLK_DEV_INITRD
+ case BI_INITRD:
+ initrd_start = (unsigned long)__va(rec->data[0]);
+ initrd_end = initrd_start + rec->data[1];
+ ROOT_DEV = mk_kdev(RAMDISK_MAJOR, 0);
+ initrd_below_start_ok = 1;
+ break;
+#endif /* CONFIG_BLK_DEV_INITRD */
+ }
+ }
+
+ return 0;
+}
+
+int __init ppc_init(void)
+{
+ /* clear the progress line */
+ ppc_md.progress(" ", 0xffff);
+
+ if (ppc_md.init != NULL) {
+ ppc_md.init();
+ }
+ return 0;
+}
+
+arch_initcall(ppc_init);
+
+void __init ppc64_calibrate_delay(void)
+{
+ loops_per_jiffy = tb_ticks_per_jiffy;
+
+ printk("Calibrating delay loop... %lu.%02lu BogoMips\n",
+ loops_per_jiffy/(500000/HZ),
+ loops_per_jiffy/(5000/HZ) % 100);
+
+}
+
+extern void (*calibrate_delay)(void);
+
+/*
+ * Called into from start_kernel, after lock_kernel has been called.
+ * Initializes bootmem, which is unsed to manage page allocation until
+ * mem_init is called.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ extern int panic_timeout;
+ extern char _etext[], _edata[];
+ extern void do_init_bootmem(void);
+
+ calibrate_delay = ppc64_calibrate_delay;
+
+#ifdef CONFIG_XMON
+ xmon_map_scc();
+ if (strstr(cmd_line, "xmon"))
+ xmon(0);
+#endif /* CONFIG_XMON */
+
+ ppc_md.progress("setup_arch:enter", 0x3eab);
+
+#if defined(CONFIG_KGDB)
+ kgdb_map_scc();
+ set_debug_traps();
+ breakpoint();
+#endif
+ /*
+ * Set cache line size based on type of cpu as a default.
+ * Systems with OF can look in the properties on the cpu node(s)
+ * for a possibly more accurate value.
+ */
+ dcache_bsize = naca->dCacheL1LineSize;
+ icache_bsize = naca->iCacheL1LineSize;
+
+ /* reboot on panic */
+ panic_timeout = 180;
+
+ init_mm.start_code = PAGE_OFFSET;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) klimit;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ strcpy(saved_command_line, cmd_line);
+ *cmdline_p = cmd_line;
+
+ /* set up the bootmem stuff with available memory */
+ do_init_bootmem();
+ ppc_md.progress("setup_arch:bootmem", 0x3eab);
+
+ ppc_md.setup_arch();
+
+ paging_init();
+ ppc_md.progress("setup_arch: exit", 0x3eab);
+}
+
+#ifdef CONFIG_IDE
+
+/* Convert the shorts/longs in hd_driveid from little to big endian;
+ * chars are endian independant, of course, but strings need to be flipped.
+ * (Despite what it says in drivers/block/ide.h, they come up as little
+ * endian...)
+ *
+ * Changes to linux/hdreg.h may require changes here. */
+void ppc64_ide_fix_driveid(struct hd_driveid *id)
+{
+ int i;
+ unsigned short *stringcast;
+
+ id->config = __le16_to_cpu(id->config);
+ id->cyls = __le16_to_cpu(id->cyls);
+ id->reserved2 = __le16_to_cpu(id->reserved2);
+ id->heads = __le16_to_cpu(id->heads);
+ id->track_bytes = __le16_to_cpu(id->track_bytes);
+ id->sector_bytes = __le16_to_cpu(id->sector_bytes);
+ id->sectors = __le16_to_cpu(id->sectors);
+ id->vendor0 = __le16_to_cpu(id->vendor0);
+ id->vendor1 = __le16_to_cpu(id->vendor1);
+ id->vendor2 = __le16_to_cpu(id->vendor2);
+ stringcast = (unsigned short *)&id->serial_no[0];
+ for (i = 0; i < (20/2); i++)
+ stringcast[i] = __le16_to_cpu(stringcast[i]);
+ id->buf_type = __le16_to_cpu(id->buf_type);
+ id->buf_size = __le16_to_cpu(id->buf_size);
+ id->ecc_bytes = __le16_to_cpu(id->ecc_bytes);
+ stringcast = (unsigned short *)&id->fw_rev[0];
+ for (i = 0; i < (8/2); i++)
+ stringcast[i] = __le16_to_cpu(stringcast[i]);
+ stringcast = (unsigned short *)&id->model[0];
+ for (i = 0; i < (40/2); i++)
+ stringcast[i] = __le16_to_cpu(stringcast[i]);
+ id->dword_io = __le16_to_cpu(id->dword_io);
+ id->reserved50 = __le16_to_cpu(id->reserved50);
+ id->field_valid = __le16_to_cpu(id->field_valid);
+ id->cur_cyls = __le16_to_cpu(id->cur_cyls);
+ id->cur_heads = __le16_to_cpu(id->cur_heads);
+ id->cur_sectors = __le16_to_cpu(id->cur_sectors);
+ id->cur_capacity0 = __le16_to_cpu(id->cur_capacity0);
+ id->cur_capacity1 = __le16_to_cpu(id->cur_capacity1);
+ id->lba_capacity = __le32_to_cpu(id->lba_capacity);
+ id->dma_1word = __le16_to_cpu(id->dma_1word);
+ id->dma_mword = __le16_to_cpu(id->dma_mword);
+ id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes);
+ id->eide_dma_min = __le16_to_cpu(id->eide_dma_min);
+ id->eide_dma_time = __le16_to_cpu(id->eide_dma_time);
+ id->eide_pio = __le16_to_cpu(id->eide_pio);
+ id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy);
+ for (i = 0; i < 2; i++)
+ id->words69_70[i] = __le16_to_cpu(id->words69_70[i]);
+ for (i = 0; i < 4; i++)
+ id->words71_74[i] = __le16_to_cpu(id->words71_74[i]);
+ id->queue_depth = __le16_to_cpu(id->queue_depth);
+ for (i = 0; i < 4; i++)
+ id->words76_79[i] = __le16_to_cpu(id->words76_79[i]);
+ id->major_rev_num = __le16_to_cpu(id->major_rev_num);
+ id->minor_rev_num = __le16_to_cpu(id->minor_rev_num);
+ id->command_set_1 = __le16_to_cpu(id->command_set_1);
+ id->command_set_2 = __le16_to_cpu(id->command_set_2);
+ id->cfsse = __le16_to_cpu(id->cfsse);
+ id->cfs_enable_1 = __le16_to_cpu(id->cfs_enable_1);
+ id->cfs_enable_2 = __le16_to_cpu(id->cfs_enable_2);
+ id->csf_default = __le16_to_cpu(id->csf_default);
+ id->dma_ultra = __le16_to_cpu(id->dma_ultra);
+ id->word89 = __le16_to_cpu(id->word89);
+ id->word90 = __le16_to_cpu(id->word90);
+ id->CurAPMvalues = __le16_to_cpu(id->CurAPMvalues);
+ id->word92 = __le16_to_cpu(id->word92);
+ id->hw_config = __le16_to_cpu(id->hw_config);
+ for (i = 0; i < 32; i++)
+ id->words94_125[i] = __le16_to_cpu(id->words94_125[i]);
+ id->last_lun = __le16_to_cpu(id->last_lun);
+ id->word127 = __le16_to_cpu(id->word127);
+ id->dlf = __le16_to_cpu(id->dlf);
+ id->csfo = __le16_to_cpu(id->csfo);
+ for (i = 0; i < 26; i++)
+ id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
+ id->word156 = __le16_to_cpu(id->word156);
+ for (i = 0; i < 3; i++)
+ id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
+ for (i = 0; i < 96; i++)
+ id->words160_255[i] = __le16_to_cpu(id->words160_255[i]);
+}
+#endif
+
+
+void exception_trace(unsigned long trap)
+{
+ unsigned long x, srr0, srr1, reg20, reg1, reg21;
+
+ asm("mflr %0" : "=r" (x) :);
+ asm("mfspr %0,0x1a" : "=r" (srr0) :);
+ asm("mfspr %0,0x1b" : "=r" (srr1) :);
+ asm("mr %0,1" : "=r" (reg1) :);
+ asm("mr %0,20" : "=r" (reg20) :);
+ asm("mr %0,21" : "=r" (reg21) :);
+
+ udbg_puts("\n");
+ udbg_puts("Took an exception : "); udbg_puthex(x); udbg_puts("\n");
+ udbg_puts(" "); udbg_puthex(reg1); udbg_puts("\n");
+ udbg_puts(" "); udbg_puthex(reg20); udbg_puts("\n");
+ udbg_puts(" "); udbg_puthex(reg21); udbg_puts("\n");
+ udbg_puts(" "); udbg_puthex(srr0); udbg_puts("\n");
+ udbg_puts(" "); udbg_puthex(srr1); udbg_puts("\n");
+}
+
+int set_spread_lpevents( char * str )
+{
+ /* The parameter is the number of processors to share in processing lp events */
+ unsigned long i;
+ unsigned long val = simple_strtoul( str, NULL, 0 );
+ if ( ( val > 0 ) && ( val <= maxPacas ) ) {
+ for ( i=1; i<val; ++i )
+ xPaca[i].lpQueuePtr = xPaca[0].lpQueuePtr;
+ printk("lpevent processing spread over %ld processors\n", val);
+ }
+ else
+ printk("invalid spreaqd_lpevents %ld\n", val);
+ return 1;
+}
+
+/* This should only be called on processor 0 during calibrate decr */
+void setup_default_decr(void)
+{
+ struct Paca * paca = (struct Paca *)mfspr(SPRG3);
+
+ if ( decr_overclock_set && !decr_overclock_proc0_set )
+ decr_overclock_proc0 = decr_overclock;
+
+ paca->default_decr = tb_ticks_per_jiffy / decr_overclock_proc0;
+ paca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
+}
+
+int set_decr_overclock_proc0( char * str )
+{
+ unsigned long val = simple_strtoul( str, NULL, 0 );
+ if ( ( val >= 1 ) && ( val <= 48 ) ) {
+ decr_overclock_proc0_set = 1;
+ decr_overclock_proc0 = val;
+ printk("proc 0 decrementer overclock factor of %ld\n", val);
+ }
+ else
+ printk("invalid proc 0 decrementer overclock factor of %ld\n", val);
+ return 1;
+}
+
+int set_decr_overclock( char * str )
+{
+ unsigned long val = simple_strtoul( str, NULL, 0 );
+ if ( ( val >= 1 ) && ( val <= 48 ) ) {
+ decr_overclock_set = 1;
+ decr_overclock = val;
+ printk("decrementer overclock factor of %ld\n", val);
+ }
+ else
+ printk("invalid decrementer overclock factor of %ld\n", val);
+ return 1;
+
+}
+
+__setup("spread_lpevents=", set_spread_lpevents );
+__setup("decr_overclock_proc0=", set_decr_overclock_proc0 );
+__setup("decr_overclock=", set_decr_overclock );
--- /dev/null
+/*
+ * linux/arch/ppc64/kernel/signal.c
+ *
+ *
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/i386/kernel/signal.c"
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/elf.h>
+#include <asm/ppc32.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/ppcdebug.h>
+#include <asm/unistd.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#define GP_REGS_SIZE MIN(sizeof(elf_gregset_t), sizeof(struct pt_regs))
+
+/*
+ * These are the flags in the MSR that the user is allowed to change
+ * by modifying the saved value of the MSR on the stack. SE and BE
+ * should not be in this list since gdb may want to change these. I.e,
+ * you should be able to step out of a signal handler to see what
+ * instruction executes next after the signal handler completes.
+ * Alternately, if you stepped into a signal handler, you should be
+ * able to continue 'til the next breakpoint from within the signal
+ * handler, even if the handler returns.
+ */
+#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
+
+int do_signal(sigset_t *oldset, struct pt_regs *regs);
+extern long sys_wait4(pid_t pid, unsigned int *stat_addr,
+ int options, /*unsigned long*/ struct rusage *ru);
+
+int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
+{
+ if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
+ return -EFAULT;
+ if (from->si_code < 0)
+ return __copy_to_user(to, from, sizeof(siginfo_t));
+ else {
+ int err;
+
+ /* If you change siginfo_t structure, please be sure
+ this code is fixed accordingly.
+ It should never copy any pad contained in the structure
+ to avoid security leaks, but must copy the generic
+ 3 ints plus the relevant union member. */
+ err = __put_user(from->si_signo, &to->si_signo);
+ err |= __put_user(from->si_errno, &to->si_errno);
+ err |= __put_user((short)from->si_code, &to->si_code);
+ /* First 32bits of unions are always present. */
+ err |= __put_user(from->si_pid, &to->si_pid);
+ switch (from->si_code >> 16) {
+ case __SI_FAULT >> 16:
+ break;
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
+ default:
+ err |= __put_user(from->si_uid, &to->si_uid);
+ break;
+ /* case __SI_RT: This is not generated by the kernel as of now. */
+ }
+ return err;
+ }
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
+ struct pt_regs *regs)
+{
+ sigset_t saveset;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_sigsuspend - running - pid=%ld current=%lx comm=%s \n",
+ current->pid, current, current->comm);
+
+
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ regs->gpr[3] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ /*
+ * If a signal handler needs to be called,
+ * do_signal() has set R3 to the signal number (the
+ * first argument of the signal handler), so don't
+ * overwrite that with EINTR !
+ * In the other cases, do_signal() doesn't touch
+ * R3, so it's still set to -EINTR (see above).
+ */
+ return regs->gpr[3];
+ }
+}
+
+long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int p6,
+ int p7, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+
+ PPCDBG(PPCDBG_SYS64X, "sys_rt_sigsuspend - running - pid=%ld current=%lx comm=%s \n",
+ current->pid, current, current->comm);
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ regs->gpr[3] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return regs->gpr[3];
+ }
+}
+
+
+
+asmlinkage long sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+{
+ struct pt_regs *regs = (struct pt_regs *) &uss;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_sigaltstack - running - pid=%ld current=%lx comm=%s \n",
+ current->pid, current, current->comm);
+
+ return do_sigaltstack(uss, uoss, regs->gpr[1]);
+}
+
+long sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_sigaction - running - pid=%ld current=%lx comm=%s \n",
+ current->pid, current, current->comm);
+
+
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+
+
+
+ return ret;
+}
+
+/*
+ * When we have signals to deliver, we set up on the
+ * user stack, going down from the original stack pointer:
+ * a sigregs struct
+ * one or more sigcontext structs
+ * a gap of __SIGNAL_FRAMESIZE bytes
+ *
+ * Each of these things must be a multiple of 16 bytes in size.
+ *
+ * XXX ultimately we will have to stack up a siginfo and ucontext
+ * for each rt signal.
+ */
+struct sigregs {
+ elf_gregset_t gp_regs;
+ double fp_regs[ELF_NFPREG];
+ unsigned int tramp[2];
+ /* 64 bit API allows for 288 bytes below sp before
+ decrementing it. */
+ int abigap[72];
+};
+
+
+
+struct rt_sigframe
+{
+ unsigned long _unused[2];
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+
+/*
+ * When we have rt signals to deliver, we set up on the
+ * user stack, going down from the original stack pointer:
+ * a sigregs struct
+ * one rt_sigframe struct (siginfo + ucontext)
+ * a gap of __SIGNAL_FRAMESIZE bytes
+ *
+ * Each of these things must be a multiple of 16 bytes in size.
+ *
+ */
+
+int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7, unsigned long r8,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe *rt_sf;
+ struct sigcontext_struct sigctx;
+ struct sigregs *sr;
+ int ret;
+ elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */
+ sigset_t set;
+ stack_t st;
+ unsigned long prevsp;
+
+ rt_sf = (struct rt_sigframe *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))
+ || copy_from_user(&set, &rt_sf->uc.uc_sigmask, sizeof(set))
+ || copy_from_user(&st, &rt_sf->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ rt_sf++; /* Look at next rt_sigframe */
+ if (rt_sf == (struct rt_sigframe *)(sigctx.regs)) {
+ /* Last stacked signal - restore registers -
+ * sigctx is initialized to point to the
+ * preamble frame (where registers are stored)
+ * see handle_signal()
+ */
+ sr = (struct sigregs *) sigctx.regs;
+ if (regs->msr & MSR_FP )
+ giveup_fpu(current);
+ if (copy_from_user(saved_regs, &sr->gp_regs,
+ sizeof(sr->gp_regs)))
+ goto badframe;
+ saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
+ | (saved_regs[PT_MSR] & MSR_USERCHANGE);
+ saved_regs[PT_SOFTE] = regs->softe;
+ memcpy(regs, saved_regs, GP_REGS_SIZE);
+ if (copy_from_user(current->thread.fpr, &sr->fp_regs,
+ sizeof(sr->fp_regs)))
+ goto badframe;
+ /* This function sets back the stack flags into
+ the current task structure. */
+ sys_sigaltstack(&st, NULL);
+
+ ret = regs->result;
+ } else {
+ /* More signals to go */
+ /* Set up registers for next signal handler */
+ regs->gpr[1] = (unsigned long)rt_sf - __SIGNAL_FRAMESIZE;
+ if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx)))
+ goto badframe;
+ sr = (struct sigregs *) sigctx.regs;
+ regs->gpr[3] = ret = sigctx.signal;
+ /* Get the siginfo */
+ get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo);
+ /* Get the ucontext */
+ get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc);
+ regs->gpr[6] = (unsigned long) rt_sf;
+
+ regs->link = (unsigned long) &sr->tramp;
+ regs->nip = sigctx.handler;
+ if (get_user(prevsp, &sr->gp_regs[PT_R1])
+ || put_user(prevsp, (unsigned long *) regs->gpr[1]))
+ goto badframe;
+ }
+ return ret;
+
+badframe:
+ do_exit(SIGSEGV);
+}
+
+static void
+setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
+ signed long newsp)
+{
+ struct rt_sigframe *rt_sf = (struct rt_sigframe *) newsp;
+ /* Handler is *really* a pointer to the function descriptor for
+ * the signal routine. The first entry in the function
+ * descriptor is the entry address of signal and the second
+ * entry is the TOC value we need to use.
+ */
+ struct funct_descr_entry {
+ unsigned long entry;
+ unsigned long toc;
+ };
+
+ struct funct_descr_entry * funct_desc_ptr;
+ unsigned long temp_ptr;
+
+ /* Set up preamble frame */
+ if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto badframe;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+ if (__copy_to_user(&frame->gp_regs, regs, GP_REGS_SIZE)
+ || __copy_to_user(&frame->fp_regs, current->thread.fpr,
+ ELF_NFPREG * sizeof(double))
+ || __put_user(0x38000000UL + __NR_rt_sigreturn, &frame->tramp[0]) /* li r0, __NR_rt_sigreturn */
+ || __put_user(0x44000002UL, &frame->tramp[1])) /* sc */
+ goto badframe;
+ flush_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[2]);
+
+ /* Retrieve rt_sigframe from stack and
+ set up registers for signal handler
+ */
+ newsp -= __SIGNAL_FRAMESIZE;
+
+ if ( get_user(temp_ptr, &rt_sf->uc.uc_mcontext.handler)) {
+ goto badframe;
+ }
+
+ funct_desc_ptr = ( struct funct_descr_entry *) temp_ptr;
+
+ if (put_user(regs->gpr[1], (unsigned long *)newsp)
+ || get_user(regs->nip, &funct_desc_ptr->entry)
+ || get_user(regs->gpr[2], &funct_desc_ptr->toc)
+ || get_user(regs->gpr[3], &rt_sf->uc.uc_mcontext.signal)
+ || get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo)
+ || get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc))
+ goto badframe;
+
+ regs->gpr[1] = newsp;
+ regs->gpr[6] = (unsigned long) rt_sf;
+ regs->link = (unsigned long) frame->tramp;
+
+
+ return;
+
+badframe:
+#if DEBUG_SIG
+ printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
+ regs, frame, newsp);
+#endif
+ do_exit(SIGSEGV);
+}
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7, unsigned long r8,
+ struct pt_regs *regs)
+{
+ struct sigcontext_struct *sc, sigctx;
+ struct sigregs *sr;
+ long ret;
+ elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */
+ sigset_t set;
+ unsigned long prevsp;
+
+ sc = (struct sigcontext_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
+ goto badframe;
+
+ set.sig[0] = sigctx.oldmask;
+#if _NSIG_WORDS > 1
+ set.sig[1] = sigctx._unused[3];
+#endif
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ sc++; /* Look at next sigcontext */
+ if (sc == (struct sigcontext_struct *)(sigctx.regs)) {
+ /* Last stacked signal - restore registers */
+ sr = (struct sigregs *) sigctx.regs;
+ if (regs->msr & MSR_FP )
+ giveup_fpu(current);
+ if (copy_from_user(saved_regs, &sr->gp_regs,
+ sizeof(sr->gp_regs)))
+ goto badframe;
+ saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
+ | (saved_regs[PT_MSR] & MSR_USERCHANGE);
+ saved_regs[PT_SOFTE] = regs->softe;
+ memcpy(regs, saved_regs, GP_REGS_SIZE);
+
+ if (copy_from_user(current->thread.fpr, &sr->fp_regs,
+ sizeof(sr->fp_regs)))
+ goto badframe;
+
+ ret = regs->result;
+
+ } else {
+ /* More signals to go */
+ regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE;
+ if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
+ goto badframe;
+ sr = (struct sigregs *) sigctx.regs;
+ regs->gpr[3] = ret = sigctx.signal;
+ regs->gpr[4] = (unsigned long) sc;
+ regs->link = (unsigned long) &sr->tramp;
+ regs->nip = sigctx.handler;
+
+ if (get_user(prevsp, &sr->gp_regs[PT_R1])
+ || put_user(prevsp, (unsigned long *) regs->gpr[1]))
+ goto badframe;
+ }
+ return ret;
+
+badframe:
+ do_exit(SIGSEGV);
+}
+
+/*
+ * Set up a signal frame.
+ */
+static void
+setup_frame(struct pt_regs *regs, struct sigregs *frame,
+ unsigned long newsp)
+{
+
+ /* Handler is *really* a pointer to the function descriptor for
+ * the signal routine. The first entry in the function
+ * descriptor is the entry address of signal and the second
+ * entry is the TOC value we need to use.
+ */
+ struct funct_descr_entry {
+ unsigned long entry;
+ unsigned long toc;
+ };
+
+ struct funct_descr_entry * funct_desc_ptr;
+ unsigned long temp_ptr;
+
+ struct sigcontext_struct *sc = (struct sigcontext_struct *) newsp;
+
+ if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto badframe;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+ if (__copy_to_user(&frame->gp_regs, regs, GP_REGS_SIZE)
+ || __copy_to_user(&frame->fp_regs, current->thread.fpr,
+ ELF_NFPREG * sizeof(double))
+ || __put_user(0x38000000UL + __NR_sigreturn, &frame->tramp[0]) /* li r0, __NR_sigreturn */
+ || __put_user(0x44000002UL, &frame->tramp[1])) /* sc */
+ goto badframe;
+ flush_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[2]);
+
+ newsp -= __SIGNAL_FRAMESIZE;
+ if ( get_user(temp_ptr, &sc->handler))
+ goto badframe;
+
+ funct_desc_ptr = ( struct funct_descr_entry *) temp_ptr;
+
+ if (put_user(regs->gpr[1], (unsigned long *)newsp)
+ || get_user(regs->nip, & funct_desc_ptr ->entry)
+ || get_user(regs->gpr[2],& funct_desc_ptr->toc)
+ || get_user(regs->gpr[3], &sc->signal))
+ goto badframe;
+ regs->gpr[1] = newsp;
+ regs->gpr[4] = (unsigned long) sc;
+ regs->link = (unsigned long) frame->tramp;
+
+
+ PPCDBG(PPCDBG_SIGNAL, "setup_frame - returning - regs->gpr[1]=%lx, regs->gpr[4]=%lx, regs->link=%lx \n",
+ regs->gpr[1], regs->gpr[4], regs->link);
+
+ return;
+
+ badframe:
+ PPCDBG(PPCDBG_SIGNAL, "setup_frame - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER();
+#if DEBUG_SIG
+ printk("badframe in setup_frame, regs=%p frame=%p newsp=%lx\n",
+ regs, frame, newsp);
+#endif
+ do_exit(SIGSEGV);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
+ unsigned long *newspp, unsigned long frame)
+{
+ struct sigcontext_struct *sc;
+ struct rt_sigframe *rt_sf;
+
+ if (regs->trap == 0x0C00 /* System Call! */
+ && ((int)regs->result == -ERESTARTNOHAND ||
+ ((int)regs->result == -ERESTARTSYS &&
+ !(ka->sa.sa_flags & SA_RESTART))))
+ regs->result = -EINTR;
+ /* Set up Signal Frame */
+
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ /* Put a Real Time Context onto stack */
+ *newspp -= sizeof(*rt_sf);
+ rt_sf = (struct rt_sigframe *) *newspp;
+ if (verify_area(VERIFY_WRITE, rt_sf, sizeof(*rt_sf)))
+ goto badframe;
+
+
+ if (__put_user((unsigned long) ka->sa.sa_handler, &rt_sf->uc.uc_mcontext.handler)
+ || __put_user(&rt_sf->info, &rt_sf->pinfo)
+ || __put_user(&rt_sf->uc, &rt_sf->puc)
+ /* Put the siginfo */
+ || __copy_to_user(&rt_sf->info, info, sizeof(*info))
+ /* Create the ucontext */
+ || __put_user(0, &rt_sf->uc.uc_flags)
+ || __put_user(0, &rt_sf->uc.uc_link)
+ || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
+ || __put_user(sas_ss_flags(regs->gpr[1]),
+ &rt_sf->uc.uc_stack.ss_flags)
+ || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
+ || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset))
+ /* mcontext.regs points to preamble register frame */
+ || __put_user((struct pt_regs *)frame, &rt_sf->uc.uc_mcontext.regs)
+ || __put_user(sig, &rt_sf->uc.uc_mcontext.signal))
+ goto badframe;
+
+ } else {
+ /* Put another sigcontext on the stack */
+ *newspp -= sizeof(*sc);
+ sc = (struct sigcontext_struct *) *newspp;
+ if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
+ goto badframe;
+
+ if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
+ || __put_user(oldset->sig[0], &sc->oldmask)
+#if _NSIG_WORDS > 1
+ || __put_user(oldset->sig[1], &sc->_unused[3])
+#endif
+ || __put_user((struct pt_regs *)frame, &sc->regs)
+ || __put_user(sig, &sc->signal))
+ goto badframe;
+ }
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sigmask_lock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+ }
+ return;
+
+badframe:
+#if DEBUG_SIG
+ printk("badframe in handle_signal, regs=%p frame=%lx newsp=%lx\n",
+ regs, frame, *newspp);
+ printk("sc=%p sig=%d ka=%p info=%p oldset=%p\n", sc, sig, ka, info, oldset);
+#endif
+ do_exit(SIGSEGV);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
+int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ siginfo_t info;
+ struct k_sigaction *ka;
+ unsigned long frame, newsp;
+
+ /*
+ * If the current thread is 32 bit - invoke the
+ * 32 bit signal handling code
+ */
+ if (test_thread_flag(TIF_32BIT))
+ return do_signal32(oldset, regs);
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ newsp = frame = 0;
+
+ for (;;) {
+ unsigned long signr;
+
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - (pre) dequeueing signal - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ spin_lock_irq(¤t->sigmask_lock);
+ signr = dequeue_signal(¤t->blocked, &info);
+ spin_unlock_irq(¤t->sigmask_lock);
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - (aft) dequeueing signal - signal=%lx - pid=%ld current=%lx comm=%s \n", signr, current->pid, current, current->comm);
+
+ if (!signr)
+ break;
+
+ if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(¤t->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = ¤t->sig->action[signr-1];
+
+
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - ka=%p, action handler=%lx \n", ka, ka->sa.sa_handler);
+
+ if (ka->sa.sa_handler == SIG_IGN) {
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - into SIG_IGN logic \n");
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - into SIG_DFL logic \n");
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
+ if (do_coredump(signr, regs))
+ exit_code |= 0x80;
+ /* FALLTHRU */
+
+ default:
+ sig_exit(signr, exit_code, &info);
+ /* NOTREACHED */
+ }
+ }
+
+ if ( (ka->sa.sa_flags & SA_ONSTACK)
+ && (! on_sig_stack(regs->gpr[1])))
+ newsp = (current->sas_ss_sp + current->sas_ss_size);
+ else
+ newsp = regs->gpr[1];
+ newsp = frame = newsp - sizeof(struct sigregs);
+
+ /* Whee! Actually deliver the signal. */
+
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - GOING TO RUN SIGNAL HANDLER - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ handle_signal(signr, ka, &info, oldset, regs, &newsp, frame);
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - after running signal handler - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ break;
+ }
+
+ if (regs->trap == 0x0C00 /* System Call! */ &&
+ ((int)regs->result == -ERESTARTNOHAND ||
+ (int)regs->result == -ERESTARTSYS ||
+ (int)regs->result == -ERESTARTNOINTR)) {
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - going to back up & retry system call \n");
+ regs->gpr[3] = regs->orig_gpr3;
+ regs->nip -= 4; /* Back up & retry system call */
+ regs->result = 0;
+ }
+
+ if (newsp == frame)
+ {
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - returning w/ no signal delivered \n");
+ return 0; /* no signals delivered */
+ }
+
+
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(regs, (struct sigregs *) frame, newsp);
+ else
+ setup_frame(regs, (struct sigregs *) frame, newsp);
+ PPCDBG(PPCDBG_SIGNAL, "do_signal - returning a signal was delivered \n");
+ return 1;
+}
--- /dev/null
+/*
+ * signal32.c: Support 32bit signal syscalls.
+ *
+ * Copyright (C) 2001 IBM
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ptrace.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/utime.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/smb_fs.h>
+#include <linux/smb_mount.h>
+#include <linux/ncp_fs.h>
+#include <linux/quota.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/filter.h>
+#include <asm/types.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+#include <linux/elf.h>
+#include <asm/ppc32.h>
+#include <asm/ppcdebug.h>
+#include <asm/unistd.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+/*
+ * These are the flags in the MSR that the user is allowed to change
+ * by modifying the saved value of the MSR on the stack. SE and BE
+ * should not be in this list since gdb may want to change these. I.e,
+ * you should be able to step out of a signal handler to see what
+ * instruction executes next after the signal handler completes.
+ * Alternately, if you stepped into a signal handler, you should be
+ * able to continue 'til the next breakpoint from within the signal
+ * handler, even if the handler returns.
+ */
+#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
+
+struct timespec32 {
+ s32 tv_sec;
+ s32 tv_nsec;
+};
+
+struct sigregs32 {
+ /***********************************************************************/
+ /* the gp_regs array is 32 bit representation of the pt_regs structure */
+ /* that was stored on the kernle stack during the system call that */
+ /* was interrupted for the signal. */
+ /* */
+ /* Note that the entire pt_regs regs structure will fit in the gp_regs */
+ /* structure because the ELF_NREG value is 48 for PPC and the pt_regs*/
+ /* structure contains 44 registers */
+ /* */
+ /***********************************************************************/
+ elf_gregset_t32 gp_regs;
+ double fp_regs[ELF_NFPREG];
+ unsigned int tramp[2];
+ /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
+ and 18 fp regs below sp before decrementing it. */
+ int abigap[56];
+};
+
+
+struct rt_sigframe_32 {
+ /* Unused space at start of frame to allow for storing of stack pointers */
+ unsigned long _unused;
+ /* This is a 32 bit pointer in user address space
+ * it is a pointer to the siginfo stucture in the rt stack frame
+ */
+ u32 pinfo;
+ /* This is a 32 bit pointer in user address space */
+ /* it is a pointer to the user context in the rt stack frame */
+ u32 puc;
+ struct siginfo32 info;
+ struct ucontext32 uc;
+};
+
+
+
+
+
+extern asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
+
+
+/****************************************************************************/
+/* Start of nonRT signal support */
+/* */
+/* sigset_t is 32 bits for non-rt signals */
+/* */
+/* System Calls */
+/* sigaction sys32_sigaction */
+/* sigpending sys32_sigpending */
+/* sigprocmask sys32_sigprocmask */
+/* sigreturn sys32_sigreturn */
+/* */
+/* Note sigsuspend has no special 32 bit routine - uses the 64 bit routine */
+/* */
+/* Other routines */
+/* setup_frame32 */
+/* */
+/****************************************************************************/
+
+
+asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 *act, struct old_sigaction32 *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sigaction - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ if (sig < 0)
+ {
+ sig = -sig;
+ }
+
+ if (act)
+ {
+ old_sigset_t32 mask;
+
+ ret = get_user((long)new_ka.sa.sa_handler, &act->sa_handler);
+ ret |= __get_user((long)new_ka.sa.sa_restorer, &act->sa_restorer);
+ ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= __get_user(mask, &act->sa_mask);
+ if (ret)
+ return ret;
+ PPCDBG(PPCDBG_SIGNAL, "sys32_sigaction flags =%lx \n", new_ka.sa.sa_flags);
+
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact)
+ {
+ ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
+ ret |= __put_user((long)old_ka.sa.sa_restorer, &oact->sa_restorer);
+ ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sigaction - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return ret;
+}
+
+
+
+
+extern asmlinkage long sys_sigpending(old_sigset_t *set);
+
+asmlinkage long sys32_sigpending(old_sigset_t32 *set)
+{
+ old_sigset_t s;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sigpending - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ set_fs (KERNEL_DS);
+ ret = sys_sigpending(&s);
+ set_fs (old_fs);
+ if (put_user (s, set)) return -EFAULT;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sigpending - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return ret;
+}
+
+
+
+
+extern asmlinkage long sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset);
+
+/* Note: it is necessary to treat how as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sigprocmask(u32 how, old_sigset_t32 *set, old_sigset_t32 *oset)
+{
+ old_sigset_t s;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sigprocmask - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ if (set && get_user (s, set)) return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_sigprocmask((int)how, set ? &s : NULL, oset ? &s : NULL);
+ set_fs (old_fs);
+ if (ret) return ret;
+ if (oset && put_user (s, oset)) return -EFAULT;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sigprocmask - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return 0;
+}
+
+
+
+/*
+ * When we have signals to deliver, we set up on the
+ * user stack, going down from the original stack pointer:
+ * a sigregs struct
+ * one or more sigcontext structs
+ * a gap of __SIGNAL_FRAMESIZE32 bytes
+ *
+ * Each of these things must be a multiple of 16 bytes in size.
+ *
+*/
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7, unsigned long r8,
+ struct pt_regs *regs)
+{
+ struct sigcontext32_struct *sc, sigctx;
+ struct sigregs32 *sr;
+ int ret;
+ elf_gregset_t32 saved_regs; /* an array of ELF_NGREG unsigned ints (32 bits) */
+ sigset_t set;
+ unsigned int prevsp;
+
+ PPCDBG(PPCDBG_SIGNAL, "sys32_sigreturn - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ sc = (struct sigcontext32_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
+ if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
+ goto badframe;
+
+ /* Note that PPC32 puts the upper 32 bits of the sigmask in the */
+ /* unused part of the signal stackframe */
+ set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3])<< 32);
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ sc++; /* Look at next sigcontext */
+ /* If the next sigcontext is actually the sigregs (frame) */
+ /* - then no more sigcontexts on the user stack */
+ if (sc == (struct sigcontext32_struct*)(u64)sigctx.regs)
+ {
+ /* Last stacked signal - restore registers */
+ sr = (struct sigregs32*)(u64)sigctx.regs;
+ if (regs->msr & MSR_FP )
+ giveup_fpu(current);
+ /* copy the 32 bit register values off the user stack */
+ /* into the 32 bit register area */
+ if (copy_from_user(saved_regs, &sr->gp_regs,sizeof(sr->gp_regs)))
+ goto badframe;
+ /**********************************************************************/
+ /* The saved reg structure in the frame is an elf_grepset_t32, it is */
+ /* a 32 bit register save of the registers in the pt_regs structure */
+ /* that was stored on the kernel stack during the system call */
+ /* when the system call was interrupted for the signal. Only 32 bits*/
+ /* are saved because the sigcontext contains a pointer to the regs */
+ /* and the sig context address is passed as a pointer to the signal */
+ /* handler. */
+ /* */
+ /* The entries in the elf_grepset have the same index as the elements */
+ /* in the pt_regs structure. */
+ /* */
+ /**********************************************************************/
+
+ saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
+ | (saved_regs[PT_MSR] & MSR_USERCHANGE);
+ regs->gpr[0] = (u64)(saved_regs[0]) & 0xFFFFFFFF;
+ regs->gpr[1] = (u64)(saved_regs[1]) & 0xFFFFFFFF;
+ /**********************************************************************/
+ /* Register 2 is the kernel toc - should be reset on any calls into */
+ /* the kernel */
+ /**********************************************************************/
+ regs->gpr[2] = (u64)(saved_regs[2]) & 0xFFFFFFFF;
+
+ regs->gpr[3] = (u64)(saved_regs[3]) & 0xFFFFFFFF;
+ regs->gpr[4] = (u64)(saved_regs[4]) & 0xFFFFFFFF;
+ regs->gpr[5] = (u64)(saved_regs[5]) & 0xFFFFFFFF;
+ regs->gpr[6] = (u64)(saved_regs[6]) & 0xFFFFFFFF;
+ regs->gpr[7] = (u64)(saved_regs[7]) & 0xFFFFFFFF;
+ regs->gpr[8] = (u64)(saved_regs[8]) & 0xFFFFFFFF;
+ regs->gpr[9] = (u64)(saved_regs[9]) & 0xFFFFFFFF;
+ regs->gpr[10] = (u64)(saved_regs[10]) & 0xFFFFFFFF;
+ regs->gpr[11] = (u64)(saved_regs[11]) & 0xFFFFFFFF;
+ regs->gpr[12] = (u64)(saved_regs[12]) & 0xFFFFFFFF;
+ regs->gpr[13] = (u64)(saved_regs[13]) & 0xFFFFFFFF;
+ regs->gpr[14] = (u64)(saved_regs[14]) & 0xFFFFFFFF;
+ regs->gpr[15] = (u64)(saved_regs[15]) & 0xFFFFFFFF;
+ regs->gpr[16] = (u64)(saved_regs[16]) & 0xFFFFFFFF;
+ regs->gpr[17] = (u64)(saved_regs[17]) & 0xFFFFFFFF;
+ regs->gpr[18] = (u64)(saved_regs[18]) & 0xFFFFFFFF;
+ regs->gpr[19] = (u64)(saved_regs[19]) & 0xFFFFFFFF;
+ regs->gpr[20] = (u64)(saved_regs[20]) & 0xFFFFFFFF;
+ regs->gpr[21] = (u64)(saved_regs[21]) & 0xFFFFFFFF;
+ regs->gpr[22] = (u64)(saved_regs[22]) & 0xFFFFFFFF;
+ regs->gpr[23] = (u64)(saved_regs[23]) & 0xFFFFFFFF;
+ regs->gpr[24] = (u64)(saved_regs[24]) & 0xFFFFFFFF;
+ regs->gpr[25] = (u64)(saved_regs[25]) & 0xFFFFFFFF;
+ regs->gpr[26] = (u64)(saved_regs[26]) & 0xFFFFFFFF;
+ regs->gpr[27] = (u64)(saved_regs[27]) & 0xFFFFFFFF;
+ regs->gpr[28] = (u64)(saved_regs[28]) & 0xFFFFFFFF;
+ regs->gpr[29] = (u64)(saved_regs[29]) & 0xFFFFFFFF;
+ regs->gpr[30] = (u64)(saved_regs[30]) & 0xFFFFFFFF;
+ regs->gpr[31] = (u64)(saved_regs[31]) & 0xFFFFFFFF;
+ /****************************************************/
+ /* restore the non gpr registers */
+ /****************************************************/
+ regs->msr = (u64)(saved_regs[PT_MSR]) & 0xFFFFFFFF;
+ /* Insure that the interrupt mode is 64 bit, during 32 bit execution.
+ * (This is necessary because we only saved lower 32 bits of msr.)
+ */
+ regs->msr = regs->msr | MSR_ISF; /* When this thread is interrupted it should run in 64 bit mode. */
+
+ regs->nip = (u64)(saved_regs[PT_NIP]) & 0xFFFFFFFF;
+ regs->orig_gpr3 = (u64)(saved_regs[PT_ORIG_R3]) & 0xFFFFFFFF;
+ regs->ctr = (u64)(saved_regs[PT_CTR]) & 0xFFFFFFFF;
+ regs->link = (u64)(saved_regs[PT_LNK]) & 0xFFFFFFFF;
+ regs->xer = (u64)(saved_regs[PT_XER]) & 0xFFFFFFFF;
+ regs->ccr = (u64)(saved_regs[PT_CCR]) & 0xFFFFFFFF;
+ /* regs->softe is left unchanged (like the MSR.EE bit) */
+ /******************************************************/
+ /* the DAR and the DSISR are only relevant during a */
+ /* data or instruction storage interrupt. The value */
+ /* will be set to zero. */
+ /******************************************************/
+ regs->dar = 0;
+ regs->dsisr = 0;
+ regs->result = (u64)(saved_regs[PT_RESULT]) & 0xFFFFFFFF;
+
+ if (copy_from_user(current->thread.fpr, &sr->fp_regs, sizeof(sr->fp_regs)))
+ goto badframe;
+
+ ret = regs->result;
+ } else {
+ /* More signals to go */
+ regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE32;
+ if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
+ goto badframe;
+ sr = (struct sigregs32*)(u64)sigctx.regs;
+ regs->gpr[3] = ret = sigctx.signal;
+ regs->gpr[4] = (unsigned long) sc;
+ regs->link = (unsigned long) &sr->tramp;
+ regs->nip = sigctx.handler;
+
+ if (get_user(prevsp, &sr->gp_regs[PT_R1])
+ || put_user(prevsp, (unsigned int*) regs->gpr[1]))
+ goto badframe;
+ }
+
+ PPCDBG(PPCDBG_SIGNAL, "sys32_sigreturn - normal exit returning %ld - pid=%ld current=%lx comm=%s \n", ret, current->pid, current, current->comm);
+ return ret;
+
+badframe:
+ PPCDBG(PPCDBG_SYS32NI, "sys32_sigreturn - badframe - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ do_exit(SIGSEGV);
+}
+
+/*
+ * Set up a signal frame.
+ */
+static void
+setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
+ unsigned int newsp)
+{
+ struct sigcontext32_struct *sc = (struct sigcontext32_struct *)(u64)newsp;
+
+ if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto badframe;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+
+ /***************************************************************/
+ /* */
+ /* Copy the register contents for the pt_regs structure on the */
+ /* kernel stack to the elf_gregset_t32 structure on the user */
+ /* stack. This is a copy of 64 bit register values to 32 bit */
+ /* register values. The high order 32 bits of the 64 bit */
+ /* registers are not needed since a 32 bit application is */
+ /* running and the saved registers are the contents of the */
+ /* user registers at the time of a system call. */
+ /* */
+ /* The values saved on the user stack will be restored into */
+ /* the registers during the signal return processing */
+ /* */
+ /* Note the +1 is needed in order to get the lower 32 bits */
+ /* of 64 bit register */
+ /***************************************************************/
+ if (__copy_to_user(&frame->gp_regs[0], (u32*)(®s->gpr[0])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[1], (u32*)(®s->gpr[1])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[2], (u32*)(®s->gpr[2])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[3], (u32*)(®s->gpr[3])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[4], (u32*)(®s->gpr[4])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[5], (u32*)(®s->gpr[5])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[6], (u32*)(®s->gpr[6])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[7], (u32*)(®s->gpr[7])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[8], (u32*)(®s->gpr[8])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[9], (u32*)(®s->gpr[9])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[10], (u32*)(®s->gpr[10])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[11], (u32*)(®s->gpr[11])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[12], (u32*)(®s->gpr[12])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[13], (u32*)(®s->gpr[13])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[14], (u32*)(®s->gpr[14])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[15], (u32*)(®s->gpr[15])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[16], (u32*)(®s->gpr[16])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[17], (u32*)(®s->gpr[17])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[18], (u32*)(®s->gpr[18])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[19], (u32*)(®s->gpr[19])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[20], (u32*)(®s->gpr[20])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[21], (u32*)(®s->gpr[21])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[22], (u32*)(®s->gpr[22])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[23], (u32*)(®s->gpr[23])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[24], (u32*)(®s->gpr[24])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[25], (u32*)(®s->gpr[25])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[26], (u32*)(®s->gpr[26])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[27], (u32*)(®s->gpr[27])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[28], (u32*)(®s->gpr[28])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[29], (u32*)(®s->gpr[29])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[30], (u32*)(®s->gpr[30])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[31], (u32*)(®s->gpr[31])+1, sizeof(u32)))
+ goto badframe;
+
+ /*****************************************************************************/
+ /* Copy the non gpr registers to the user stack */
+ /*****************************************************************************/
+
+ if (__copy_to_user(&frame->gp_regs[PT_NIP], (u32*)(®s->gpr[PT_NIP])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_MSR], (u32*)(®s->gpr[PT_MSR])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_ORIG_R3], (u32*)(®s->gpr[PT_ORIG_R3])+1,
+ sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_CTR], (u32*)(®s->gpr[PT_CTR])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_LNK], (u32*)(®s->gpr[PT_LNK])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_XER], (u32*)(®s->gpr[PT_XER])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_CCR], (u32*)(®s->gpr[PT_CCR])+1, sizeof(u32))
+# if 0
+ || __copy_to_user(&frame->gp_regs[PT_MQ], (u32*)(®s->gpr[PT_MQ])+1, sizeof(u32))
+#endif
+ || __copy_to_user(&frame->gp_regs[PT_RESULT], (u32*)(®s->gpr[PT_RESULT])+1,
+ sizeof(u32)))
+ goto badframe;
+
+
+ /*****************************************************************************/
+ /* Now copy the floating point registers onto the user stack */
+ /* */
+ /* Also set up so on the completion of the signal handler, the sys_sigreturn */
+ /* will get control to reset the stack */
+ /*****************************************************************************/
+ if (__copy_to_user(&frame->fp_regs, current->thread.fpr,
+ ELF_NFPREG * sizeof(double))
+ || __put_user(0x38000000U + __NR_sigreturn, &frame->tramp[0]) /* li r0, __NR_sigreturn */
+ || __put_user(0x44000002U, &frame->tramp[1])) /* sc */
+ goto badframe;
+
+ flush_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[2]);
+
+ newsp -= __SIGNAL_FRAMESIZE32;
+ if (put_user(regs->gpr[1], (u32*)(u64)newsp)
+ || get_user(regs->nip, &sc->handler)
+ || get_user(regs->gpr[3], &sc->signal))
+ goto badframe;
+
+ regs->gpr[1] = newsp & 0xFFFFFFFF;
+ /**************************************************************/
+ /* first parameter to the signal handler is the signal number */
+ /* - the value is in gpr3 */
+ /* second parameter to the signal handler is the sigcontext */
+ /* - set the value into gpr4 */
+ /**************************************************************/
+ regs->gpr[4] = (unsigned long) sc;
+ regs->link = (unsigned long) frame->tramp;
+ return;
+
+ badframe:
+ udbg_printf("setup_frame32 - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER();
+#if DEBUG_SIG
+ printk("badframe in setup_frame32, regs=%p frame=%p newsp=%lx\n",
+ regs, frame, newsp);
+#endif
+ do_exit(SIGSEGV);
+}
+
+
+/****************************************************************************/
+/* Start of RT signal support */
+/* */
+/* sigset_t is 64 bits for rt signals */
+/* */
+/* System Calls */
+/* sigaction sys32_rt_sigaction */
+/* sigpending sys32_rt_sigpending */
+/* sigprocmask sys32_rt_sigprocmask */
+/* sigreturn sys32_rt_sigreturn */
+/* sigtimedwait sys32_rt_sigtimedwait */
+/* sigqueueinfo sys32_rt_sigqueueinfo */
+/* sigsuspend sys32_rt_sigsuspend */
+/* */
+/* Other routines */
+/* setup_rt_frame32 */
+/* siginfo64to32 */
+/* siginfo32to64 */
+/* */
+/* */
+/****************************************************************************/
+
+
+// This code executes after the rt signal handler in 32 bit mode has completed and
+// returned
+long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7, unsigned long r8,
+ struct pt_regs * regs)
+{
+ struct rt_sigframe_32 *rt_stack_frame;
+ struct sigcontext32_struct sigctx;
+ struct sigregs32 *signalregs;
+
+ int ret;
+ elf_gregset_t32 saved_regs; /* an array of 32 bit register values */
+ sigset_t signal_set;
+ stack_t stack;
+ unsigned int previous_stack;
+
+ ret = 0;
+ /* Adjust the inputted reg1 to point to the first rt signal frame */
+ rt_stack_frame = (struct rt_sigframe_32 *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
+ /* Copy the information from the user stack */
+ if (copy_from_user(&sigctx, &rt_stack_frame->uc.uc_mcontext,sizeof(sigctx))
+ || copy_from_user(&signal_set, &rt_stack_frame->uc.uc_sigmask,sizeof(signal_set))
+ || copy_from_user(&stack,&rt_stack_frame->uc.uc_stack,sizeof(stack)))
+ {
+ /* unable to copy from user storage */
+ goto badframe;
+ }
+
+ /* Unblock the signal that was processed
+ * After a signal handler runs -
+ * if the signal is blockable - the signal will be unblocked
+ * ( sigkill and sigstop are not blockable)
+ */
+ sigdelsetmask(&signal_set, ~_BLOCKABLE);
+ /* update the current based on the sigmask found in the rt_stackframe */
+ spin_lock_irq(¤t->sigmask_lock);
+ current->blocked = signal_set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ /* Set to point to the next rt_sigframe - this is used to determine whether this
+ * is the last signal to process
+ */
+ rt_stack_frame ++;
+
+ if (rt_stack_frame == (struct rt_sigframe_32 *)(u64)(sigctx.regs))
+ {
+ signalregs = (struct sigregs32 *) (u64)sigctx.regs;
+ /* If currently owning the floating point - give them up */
+ if (regs->msr & MSR_FP)
+ {
+ giveup_fpu(current);
+ }
+ if (copy_from_user(saved_regs,&signalregs->gp_regs,sizeof(signalregs->gp_regs)))
+ {
+ goto badframe;
+ }
+ /**********************************************************************/
+ /* The saved reg structure in the frame is an elf_grepset_t32, it is */
+ /* a 32 bit register save of the registers in the pt_regs structure */
+ /* that was stored on the kernel stack during the system call */
+ /* when the system call was interrupted for the signal. Only 32 bits*/
+ /* are saved because the sigcontext contains a pointer to the regs */
+ /* and the sig context address is passed as a pointer to the signal */
+ /* handler. */
+ /* */
+ /* The entries in the elf_grepset have the same index as the elements */
+ /* in the pt_regs structure. */
+ /* */
+ /**********************************************************************/
+
+ saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
+ | (saved_regs[PT_MSR] & MSR_USERCHANGE);
+ regs->gpr[0] = (u64)(saved_regs[0]) & 0xFFFFFFFF;
+ regs->gpr[1] = (u64)(saved_regs[1]) & 0xFFFFFFFF;
+ /**********************************************************************/
+ /* Register 2 is the kernel toc - should be reset on any calls into */
+ /* the kernel */
+ /**********************************************************************/
+ regs->gpr[2] = (u64)(saved_regs[2]) & 0xFFFFFFFF;
+
+ regs->gpr[3] = (u64)(saved_regs[3]) & 0xFFFFFFFF;
+ regs->gpr[4] = (u64)(saved_regs[4]) & 0xFFFFFFFF;
+ regs->gpr[5] = (u64)(saved_regs[5]) & 0xFFFFFFFF;
+ regs->gpr[6] = (u64)(saved_regs[6]) & 0xFFFFFFFF;
+ regs->gpr[7] = (u64)(saved_regs[7]) & 0xFFFFFFFF;
+ regs->gpr[8] = (u64)(saved_regs[8]) & 0xFFFFFFFF;
+ regs->gpr[9] = (u64)(saved_regs[9]) & 0xFFFFFFFF;
+ regs->gpr[10] = (u64)(saved_regs[10]) & 0xFFFFFFFF;
+ regs->gpr[11] = (u64)(saved_regs[11]) & 0xFFFFFFFF;
+ regs->gpr[12] = (u64)(saved_regs[12]) & 0xFFFFFFFF;
+ regs->gpr[13] = (u64)(saved_regs[13]) & 0xFFFFFFFF;
+ regs->gpr[14] = (u64)(saved_regs[14]) & 0xFFFFFFFF;
+ regs->gpr[15] = (u64)(saved_regs[15]) & 0xFFFFFFFF;
+ regs->gpr[16] = (u64)(saved_regs[16]) & 0xFFFFFFFF;
+ regs->gpr[17] = (u64)(saved_regs[17]) & 0xFFFFFFFF;
+ regs->gpr[18] = (u64)(saved_regs[18]) & 0xFFFFFFFF;
+ regs->gpr[19] = (u64)(saved_regs[19]) & 0xFFFFFFFF;
+ regs->gpr[20] = (u64)(saved_regs[20]) & 0xFFFFFFFF;
+ regs->gpr[21] = (u64)(saved_regs[21]) & 0xFFFFFFFF;
+ regs->gpr[22] = (u64)(saved_regs[22]) & 0xFFFFFFFF;
+ regs->gpr[23] = (u64)(saved_regs[23]) & 0xFFFFFFFF;
+ regs->gpr[24] = (u64)(saved_regs[24]) & 0xFFFFFFFF;
+ regs->gpr[25] = (u64)(saved_regs[25]) & 0xFFFFFFFF;
+ regs->gpr[26] = (u64)(saved_regs[26]) & 0xFFFFFFFF;
+ regs->gpr[27] = (u64)(saved_regs[27]) & 0xFFFFFFFF;
+ regs->gpr[28] = (u64)(saved_regs[28]) & 0xFFFFFFFF;
+ regs->gpr[29] = (u64)(saved_regs[29]) & 0xFFFFFFFF;
+ regs->gpr[30] = (u64)(saved_regs[30]) & 0xFFFFFFFF;
+ regs->gpr[31] = (u64)(saved_regs[31]) & 0xFFFFFFFF;
+ /****************************************************/
+ /* restore the non gpr registers */
+ /****************************************************/
+ regs->msr = (u64)(saved_regs[PT_MSR]) & 0xFFFFFFFF;
+
+ regs->nip = (u64)(saved_regs[PT_NIP]) & 0xFFFFFFFF;
+ regs->orig_gpr3 = (u64)(saved_regs[PT_ORIG_R3]) & 0xFFFFFFFF;
+ regs->ctr = (u64)(saved_regs[PT_CTR]) & 0xFFFFFFFF;
+ regs->link = (u64)(saved_regs[PT_LNK]) & 0xFFFFFFFF;
+ regs->xer = (u64)(saved_regs[PT_XER]) & 0xFFFFFFFF;
+ regs->ccr = (u64)(saved_regs[PT_CCR]) & 0xFFFFFFFF;
+ /* regs->softe is left unchanged (like MSR.EE) */
+ /******************************************************/
+ /* the DAR and the DSISR are only relevant during a */
+ /* data or instruction storage interrupt. The value */
+ /* will be set to zero. */
+ /******************************************************/
+ regs->dar = 0;
+ regs->dsisr = 0;
+ regs->result = (u64)(saved_regs[PT_RESULT]) & 0xFFFFFFFF;
+ ret = regs->result;
+ }
+ else /* more signals to go */
+ {
+ udbg_printf("hey should not occur\n");
+ regs->gpr[1] = (u64)rt_stack_frame - __SIGNAL_FRAMESIZE32;
+ if (copy_from_user(&sigctx, &rt_stack_frame->uc.uc_mcontext,sizeof(sigctx)))
+ {
+ goto badframe;
+ }
+ signalregs = (struct sigregs32 *) (u64)sigctx.regs;
+ /* first parm to signal handler is the signal number */
+ regs->gpr[3] = ret = sigctx.signal;
+ /* second parm is a pointer to sig info */
+ get_user(regs->gpr[4], &rt_stack_frame->pinfo);
+ /* third parm is a pointer to the ucontext */
+ get_user(regs->gpr[5], &rt_stack_frame->puc);
+ /* fourth parm is the stack frame */
+ regs->gpr[6] = (u64)rt_stack_frame;
+ /* Set up link register to return to sigreturn when the */
+ /* signal handler completes */
+ regs->link = (u64)&signalregs->tramp;
+ /* Set next instruction to the start fo the signal handler */
+ regs->nip = sigctx.handler;
+ /* Set the reg1 to look like a call to the signal handler */
+ if (get_user(previous_stack,&signalregs->gp_regs[PT_R1])
+ || put_user(previous_stack, (unsigned long *)regs->gpr[1]))
+ {
+ goto badframe;
+ }
+
+ }
+
+ return ret;
+
+ badframe:
+ do_exit(SIGSEGV);
+}
+
+
+
+asmlinkage long sys32_rt_sigaction(int sig, const struct sigaction32 *act, struct sigaction32 *oact, size_t sigsetsize)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ sigset32_t set32;
+
+ PPCDBG(PPCDBG_SIGNAL, "sys32_rt_sigaction - entered - sig=%x \n", sig);
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset32_t))
+ return -EINVAL;
+
+ if (act) {
+ ret = get_user((long)new_ka.sa.sa_handler, &act->sa_handler);
+ ret |= __copy_from_user(&set32, &act->sa_mask,
+ sizeof(sigset32_t));
+ switch (_NSIG_WORDS) {
+ case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
+ | (((long)set32.sig[7]) << 32);
+ case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
+ | (((long)set32.sig[5]) << 32);
+ case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
+ | (((long)set32.sig[3]) << 32);
+ case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
+ | (((long)set32.sig[1]) << 32);
+ }
+
+ ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+
+ if (ret)
+ return -EFAULT;
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ switch (_NSIG_WORDS) {
+ case 4:
+ set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
+ set32.sig[6] = old_ka.sa.sa_mask.sig[3];
+ case 3:
+ set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
+ set32.sig[4] = old_ka.sa.sa_mask.sig[2];
+ case 2:
+ set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
+ set32.sig[2] = old_ka.sa.sa_mask.sig[1];
+ case 1:
+ set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
+ set32.sig[0] = old_ka.sa.sa_mask.sig[0];
+ }
+ ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
+ ret |= __copy_to_user(&oact->sa_mask, &set32,
+ sizeof(sigset32_t));
+ ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ }
+
+
+ PPCDBG(PPCDBG_SIGNAL, "sys32_rt_sigaction - exiting - sig=%x \n", sig);
+ return ret;
+}
+
+
+extern asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
+ size_t sigsetsize);
+
+/* Note: it is necessary to treat how as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_rt_sigprocmask(u32 how, sigset32_t *set, sigset32_t *oset, size_t sigsetsize)
+{
+ sigset_t s;
+ sigset32_t s32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ PPCDBG(PPCDBG_SIGNAL, "sys32_rt_sigprocmask - entered how=%x \n", (int)how);
+
+ if (set) {
+ if (copy_from_user (&s32, set, sizeof(sigset32_t)))
+ return -EFAULT;
+
+ switch (_NSIG_WORDS) {
+ case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+ case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+ case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+ case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+ }
+ }
+
+ set_fs (KERNEL_DS);
+ ret = sys_rt_sigprocmask((int)how, set ? &s : NULL, oset ? &s : NULL,
+ sigsetsize);
+ set_fs (old_fs);
+ if (ret) return ret;
+ if (oset) {
+ switch (_NSIG_WORDS) {
+ case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+ case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+ case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+ case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+ }
+ if (copy_to_user (oset, &s32, sizeof(sigset32_t)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+extern asmlinkage long sys_rt_sigpending(sigset_t *set, size_t sigsetsize);
+
+
+
+asmlinkage long sys32_rt_sigpending(sigset32_t *set, __kernel_size_t32 sigsetsize)
+{
+
+ sigset_t s;
+ sigset32_t s32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_rt_sigpending(&s, sigsetsize);
+ set_fs (old_fs);
+ if (!ret) {
+ switch (_NSIG_WORDS) {
+ case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+ case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+ case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+ case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+ }
+ if (copy_to_user (set, &s32, sizeof(sigset32_t)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+
+
+siginfo_t32 *
+siginfo64to32(siginfo_t32 *d, siginfo_t *s)
+{
+ memset (d, 0, sizeof(siginfo_t32));
+ d->si_signo = s->si_signo;
+ d->si_errno = s->si_errno;
+ d->si_code = s->si_code;
+ if (s->si_signo >= SIGRTMIN) {
+ d->si_pid = s->si_pid;
+ d->si_uid = s->si_uid;
+
+ d->si_int = s->si_int;
+ } else switch (s->si_signo) {
+ /* XXX: What about POSIX1.b timers */
+ case SIGCHLD:
+ d->si_pid = s->si_pid;
+ d->si_status = s->si_status;
+ d->si_utime = s->si_utime;
+ d->si_stime = s->si_stime;
+ break;
+ case SIGSEGV:
+ case SIGBUS:
+ case SIGFPE:
+ case SIGILL:
+ d->si_addr = (long)(s->si_addr);
+ break;
+ case SIGPOLL:
+ d->si_band = s->si_band;
+ d->si_fd = s->si_fd;
+ break;
+ default:
+ d->si_pid = s->si_pid;
+ d->si_uid = s->si_uid;
+ break;
+ }
+ return d;
+}
+
+extern asmlinkage long
+sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
+ const struct timespec *uts, size_t sigsetsize);
+
+asmlinkage long
+sys32_rt_sigtimedwait(sigset32_t *uthese, siginfo_t32 *uinfo,
+ struct timespec32 *uts, __kernel_size_t32 sigsetsize)
+{
+ sigset_t s;
+ sigset32_t s32;
+ struct timespec t;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+ siginfo_t info;
+ siginfo_t32 info32;
+
+ if (copy_from_user (&s32, uthese, sizeof(sigset32_t)))
+ return -EFAULT;
+ switch (_NSIG_WORDS) {
+ case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+ case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+ case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+ case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+ }
+ if (uts) {
+ ret = get_user (t.tv_sec, &uts->tv_sec);
+ ret |= __get_user (t.tv_nsec, &uts->tv_nsec);
+ if (ret)
+ return -EFAULT;
+ }
+ set_fs (KERNEL_DS);
+ if (uts)
+ {
+ ret = sys_rt_sigtimedwait(&s, &info, &t, sigsetsize);
+ } else {
+ ret = sys_rt_sigtimedwait(&s, &info, (struct timespec *)uts, sigsetsize);
+ }
+
+ set_fs (old_fs);
+ if (ret >= 0 && uinfo) {
+ if (copy_to_user (uinfo, siginfo64to32(&info32, &info),
+ sizeof(siginfo_t32)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+
+
+siginfo_t *
+siginfo32to64(siginfo_t *d, siginfo_t32 *s)
+{
+ d->si_signo = s->si_signo;
+ d->si_errno = s->si_errno;
+ d->si_code = s->si_code;
+ if (s->si_signo >= SIGRTMIN) {
+ d->si_pid = s->si_pid;
+ d->si_uid = s->si_uid;
+ d->si_int = s->si_int;
+
+ } else switch (s->si_signo) {
+ /* XXX: What about POSIX1.b timers */
+ case SIGCHLD:
+ d->si_pid = s->si_pid;
+ d->si_status = s->si_status;
+ d->si_utime = s->si_utime;
+ d->si_stime = s->si_stime;
+ break;
+ case SIGSEGV:
+ case SIGBUS:
+ case SIGFPE:
+ case SIGILL:
+ d->si_addr = (void *)A(s->si_addr);
+ break;
+ case SIGPOLL:
+ d->si_band = s->si_band;
+ d->si_fd = s->si_fd;
+ break;
+ default:
+ d->si_pid = s->si_pid;
+ d->si_uid = s->si_uid;
+ break;
+ }
+ return d;
+}
+
+
+extern asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo);
+
+/* Note: it is necessary to treat pid and sig as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_rt_sigqueueinfo(u32 pid, u32 sig, siginfo_t32 *uinfo)
+{
+ siginfo_t info;
+ siginfo_t32 info32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ if (copy_from_user (&info32, uinfo, sizeof(siginfo_t32)))
+ return -EFAULT;
+ /* XXX: Is this correct? */
+ siginfo32to64(&info, &info32);
+
+ set_fs (KERNEL_DS);
+ ret = sys_rt_sigqueueinfo((int)pid, (int)sig, &info);
+ set_fs (old_fs);
+ return ret;
+}
+
+
+int do_signal(sigset_t *oldset, struct pt_regs *regs);
+int sys32_rt_sigsuspend(sigset32_t* unewset, size_t sigsetsize, int p3, int p4, int p6, int p7, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ sigset32_t s32;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&s32, unewset, sizeof(s32)))
+ return -EFAULT;
+
+ /* Swap the 2 words of the 64-bit sigset_t (they are stored in the "wrong" endian in 32-bit user storage). */
+ switch (_NSIG_WORDS)
+ {
+ case 4: newset.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+ case 3: newset.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+ case 2: newset.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+ case 1: newset.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+ }
+
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ regs->gpr[3] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return regs->gpr[3];
+ }
+}
+
+
+
+
+
+
+
+
+/*
+ * Set up a rt signal frame.
+ */
+static void
+setup_rt_frame32(struct pt_regs *regs, struct sigregs32 *frame,
+ unsigned int newsp)
+{
+ unsigned int copyreg4,copyreg5;
+ struct rt_sigframe_32 * rt_sf = (struct rt_sigframe_32 *) (u64)newsp;
+
+
+ if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto badframe;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+ /***************************************************************/
+ /* */
+ /* Copy the register contents for the pt_regs structure on the */
+ /* kernel stack to the elf_gregset_t32 structure on the user */
+ /* stack. This is a copy of 64 bit register values to 32 bit */
+ /* register values. The high order 32 bits of the 64 bit */
+ /* registers are not needed since a 32 bit application is */
+ /* running and the saved registers are the contents of the */
+ /* user registers at the time of a system call. */
+ /* */
+ /* The values saved on the user stack will be restored into */
+ /* the registers during the signal return processing */
+ /* */
+ /* Note the +1 is needed in order to get the lower 32 bits */
+ /* of 64 bit register */
+ /***************************************************************/
+ if (__copy_to_user(&frame->gp_regs[0], (u32*)(®s->gpr[0])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[1], (u32*)(®s->gpr[1])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[2], (u32*)(®s->gpr[2])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[3], (u32*)(®s->gpr[3])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[4], (u32*)(®s->gpr[4])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[5], (u32*)(®s->gpr[5])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[6], (u32*)(®s->gpr[6])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[7], (u32*)(®s->gpr[7])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[8], (u32*)(®s->gpr[8])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[9], (u32*)(®s->gpr[9])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[10], (u32*)(®s->gpr[10])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[11], (u32*)(®s->gpr[11])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[12], (u32*)(®s->gpr[12])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[13], (u32*)(®s->gpr[13])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[14], (u32*)(®s->gpr[14])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[15], (u32*)(®s->gpr[15])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[16], (u32*)(®s->gpr[16])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[17], (u32*)(®s->gpr[17])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[18], (u32*)(®s->gpr[18])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[19], (u32*)(®s->gpr[19])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[20], (u32*)(®s->gpr[20])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[21], (u32*)(®s->gpr[21])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[22], (u32*)(®s->gpr[22])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[23], (u32*)(®s->gpr[23])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[24], (u32*)(®s->gpr[24])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[25], (u32*)(®s->gpr[25])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[26], (u32*)(®s->gpr[26])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[27], (u32*)(®s->gpr[27])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[28], (u32*)(®s->gpr[28])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[29], (u32*)(®s->gpr[29])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[30], (u32*)(®s->gpr[30])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[31], (u32*)(®s->gpr[31])+1, sizeof(u32)))
+ goto badframe;
+
+ /*****************************************************************************/
+ /* Copy the non gpr registers to the user stack */
+ /*****************************************************************************/
+
+ if (__copy_to_user(&frame->gp_regs[PT_NIP], (u32*)(®s->gpr[PT_NIP])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_MSR], (u32*)(®s->gpr[PT_MSR])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_ORIG_R3], (u32*)(®s->gpr[PT_ORIG_R3])+1,
+ sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_CTR], (u32*)(®s->gpr[PT_CTR])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_LNK], (u32*)(®s->gpr[PT_LNK])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_XER], (u32*)(®s->gpr[PT_XER])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_CCR], (u32*)(®s->gpr[PT_CCR])+1, sizeof(u32))
+ || __copy_to_user(&frame->gp_regs[PT_RESULT], (u32*)(®s->gpr[PT_RESULT])+1,
+ sizeof(u32)))
+ goto badframe;
+
+
+ /*****************************************************************************/
+ /* Now copy the floating point registers onto the user stack */
+ /* */
+ /* Also set up so on the completion of the signal handler, the sys_sigreturn */
+ /* will get control to reset the stack */
+ /*****************************************************************************/
+
+
+ if (__copy_to_user(&frame->fp_regs, current->thread.fpr,
+ ELF_NFPREG * sizeof(double))
+ || __put_user(0x38000000U + __NR_rt_sigreturn, &frame->tramp[0]) /* li r0, __NR_rt_sigreturn */
+ || __put_user(0x44000002U, &frame->tramp[1])) /* sc */
+ goto badframe;
+
+ flush_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[2]);
+
+
+ /* Retrieve rt_sigframe from stack and
+ set up registers for signal handler
+ */
+ newsp -= __SIGNAL_FRAMESIZE32;
+
+
+ if (put_user((u32)(regs->gpr[1]), (unsigned int *)(u64)newsp)
+ || get_user(regs->nip, &rt_sf->uc.uc_mcontext.handler)
+ || get_user(regs->gpr[3], &rt_sf->uc.uc_mcontext.signal)
+ || get_user(copyreg4, &rt_sf->pinfo)
+ || get_user(copyreg5, &rt_sf->puc))
+ goto badframe;
+
+ regs->gpr[4] = copyreg4;
+ regs->gpr[5] = copyreg5;
+
+
+ regs->gpr[1] = newsp;
+ regs->gpr[6] = (unsigned long) rt_sf;
+
+
+ regs->link = (unsigned long) frame->tramp;
+
+ return;
+
+
+ badframe:
+ udbg_printf("setup_frame32 - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER();
+#if DEBUG_SIG
+ printk("badframe in setup_frame32, regs=%p frame=%p newsp=%lx\n",
+ regs, frame, newsp);
+#endif
+ do_exit(SIGSEGV);
+}
+
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal32(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
+ unsigned int *newspp, unsigned int frame)
+{
+ struct sigcontext32_struct *sc;
+ struct rt_sigframe_32 *rt_stack_frame;
+ siginfo_t32 siginfo32bit;
+
+ if (regs->trap == 0x0C00 /* System Call! */
+ && ((int)regs->result == -ERESTARTNOHAND ||
+ ((int)regs->result == -ERESTARTSYS &&
+ !(ka->sa.sa_flags & SA_RESTART))))
+ regs->result = -EINTR;
+
+ /* Set up the signal frame */
+ /* Determine if an real time frame - siginfo required */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ {
+ siginfo64to32(&siginfo32bit,info);
+ *newspp -= sizeof(*rt_stack_frame);
+ rt_stack_frame = (struct rt_sigframe_32 *) (u64)(*newspp) ;
+
+ if (verify_area(VERIFY_WRITE, rt_stack_frame, sizeof(*rt_stack_frame)))
+ {
+ goto badframe;
+ }
+ if (__put_user((u32)(u64)ka->sa.sa_handler, &rt_stack_frame->uc.uc_mcontext.handler)
+ || __put_user((u32)(u64)&rt_stack_frame->info, &rt_stack_frame->pinfo)
+ || __put_user((u32)(u64)&rt_stack_frame->uc, &rt_stack_frame->puc)
+ /* put the siginfo on the user stack */
+ || __copy_to_user(&rt_stack_frame->info,&siginfo32bit,sizeof(siginfo32bit))
+ /* set the ucontext on the user stack */
+ || __put_user(0,&rt_stack_frame->uc.uc_flags)
+ || __put_user(0,&rt_stack_frame->uc.uc_link)
+ || __put_user(current->sas_ss_sp, &rt_stack_frame->uc.uc_stack.ss_sp)
+ || __put_user(sas_ss_flags(regs->gpr[1]),
+ &rt_stack_frame->uc.uc_stack.ss_flags)
+ || __put_user(current->sas_ss_size, &rt_stack_frame->uc.uc_stack.ss_size)
+ || __copy_to_user(&rt_stack_frame->uc.uc_sigmask, oldset,sizeof(*oldset))
+ /* point the mcontext.regs to the pramble register frame */
+ || __put_user(frame, &rt_stack_frame->uc.uc_mcontext.regs)
+ || __put_user(sig,&rt_stack_frame->uc.uc_mcontext.signal))
+ {
+ goto badframe;
+ }
+ } else {
+ /* Put another sigcontext on the stack */
+ *newspp -= sizeof(*sc);
+ sc = (struct sigcontext32_struct *)(u64)*newspp;
+ if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
+ goto badframe;
+
+ /* Note the upper 32 bits of the signal mask are stored in the */
+ /* unused part of the signal stack frame */
+ if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler)
+ || __put_user(oldset->sig[0], &sc->oldmask)
+ || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
+ || __put_user((unsigned int)frame, &sc->regs)
+ || __put_user(sig, &sc->signal))
+ goto badframe;
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sigmask_lock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sigmask_lock);
+ }
+ }
+ return;
+
+badframe:
+#if DEBUG_SIG
+ printk("badframe in handle_signal32, regs=%p frame=%lx newsp=%lx\n",
+ regs, frame, *newspp);
+ printk("sc=%p sig=%d ka=%p info=%p oldset=%p\n", sc, sig, ka, info, oldset);
+#endif
+ do_exit(SIGSEGV);
+}
+
+
+/****************************************************************************/
+/* Start Alternate signal stack support */
+/* */
+/* */
+/* */
+/* System Calls */
+/* sigaltatck sys32_sigaltstack */
+/* */
+/****************************************************************************/
+
+
+asmlinkage int sys32_sigaltstack(u32 newstack, u32 oldstack, int p3, int p4, int p6,
+ int p7, struct pt_regs *regs)
+{
+ stack_t uss, uoss;
+ int ret;
+ mm_segment_t old_fs;
+ unsigned long sp;
+
+ /* set sp to the user stack on entry to the system call */
+ /* the system call router sets R9 to the saved registers */
+ sp = regs->gpr[1];
+
+ /* Put new stack info in local 64 bit stack struct */
+ if (newstack && (get_user((long)uss.ss_sp, &((stack_32_t *)(long)newstack)->ss_sp) ||
+ __get_user(uss.ss_flags, &((stack_32_t *)(long)newstack)->ss_flags) ||
+ __get_user(uss.ss_size, &((stack_32_t *)(long)newstack)->ss_size)))
+ return -EFAULT;
+
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = do_sigaltstack(newstack ? &uss : NULL, oldstack ? &uoss : NULL, sp);
+ set_fs(old_fs);
+ /* Copy the stack information to the user output buffer */
+ if (!ret && oldstack && (put_user((long)uoss.ss_sp, &((stack_32_t *)(long)oldstack)->ss_sp) ||
+ __put_user(uoss.ss_flags, &((stack_32_t *)(long)oldstack)->ss_flags) ||
+ __put_user(uoss.ss_size, &((stack_32_t *)(long)oldstack)->ss_size)))
+ return -EFAULT;
+ return ret;
+}
+
+
+
+/****************************************************************************/
+/* Start of do_signal32 routine */
+/* */
+/* This routine gets control when a pemding signal needs to be processed */
+/* in the 32 bit target thread - */
+/* */
+/* It handles both rt and non-rt signals */
+/* */
+/****************************************************************************/
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+
+int do_signal32(sigset_t *oldset, struct pt_regs *regs)
+{
+ siginfo_t info;
+ struct k_sigaction *ka;
+ unsigned int frame, newsp;
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ newsp = frame = 0;
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq(¤t->sigmask_lock);
+ signr = dequeue_signal(¤t->blocked, &info);
+ spin_unlock_irq(¤t->sigmask_lock);
+ ifppcdebug(PPCDBG_SYS32) {
+ if (signr)
+ udbg_printf("do_signal32 - processing signal=%2lx - pid=%ld, comm=%s \n", signr, current->pid, current->comm);
+ }
+
+ if (!signr)
+ break;
+
+ if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(¤t->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = ¤t->sig->action[signr-1];
+
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
+ if (do_coredump(signr, regs))
+ exit_code |= 0x80;
+ /* FALLTHRU */
+
+ default:
+ sig_exit(signr, exit_code, &info);
+ /* NOTREACHED */
+ }
+ }
+
+ PPCDBG(PPCDBG_SIGNAL, " do signal :sigaction flags = %lx \n" ,ka->sa.sa_flags);
+ PPCDBG(PPCDBG_SIGNAL, " do signal :on sig stack = %lx \n" ,on_sig_stack(regs->gpr[1]));
+ PPCDBG(PPCDBG_SIGNAL, " do signal :reg1 = %lx \n" ,regs->gpr[1]);
+ PPCDBG(PPCDBG_SIGNAL, " do signal :alt stack = %lx \n" ,current->sas_ss_sp);
+ PPCDBG(PPCDBG_SIGNAL, " do signal :alt stack size = %lx \n" ,current->sas_ss_size);
+
+
+
+ if ( (ka->sa.sa_flags & SA_ONSTACK)
+ && (! on_sig_stack(regs->gpr[1])))
+ {
+ newsp = (current->sas_ss_sp + current->sas_ss_size);
+ } else
+ newsp = regs->gpr[1];
+ newsp = frame = newsp - sizeof(struct sigregs32);
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal32(signr, ka, &info, oldset, regs, &newsp, frame);
+ break;
+ }
+
+ if (regs->trap == 0x0C00 /* System Call! */ &&
+ ((int)regs->result == -ERESTARTNOHAND ||
+ (int)regs->result == -ERESTARTSYS ||
+ (int)regs->result == -ERESTARTNOINTR)) {
+ regs->gpr[3] = regs->orig_gpr3;
+ regs->nip -= 4; /* Back up & retry system call */
+ regs->result = 0;
+ }
+
+ if (newsp == frame)
+ {
+ return 0; /* no signals delivered */
+ }
+ // Invoke correct stack setup routine
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame32(regs, (struct sigregs32*)(u64)frame, newsp);
+ else
+ setup_frame32(regs, (struct sigregs32*)(u64)frame, newsp);
+
+ return 1;
+
+}
--- /dev/null
+/*
+ *
+ *
+ * SMP support for ppc.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
+ * deal of code from the sparc and intel versions.
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ *
+ * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
+ * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+#include <linux/init.h>
+/* #include <linux/openpic.h> */
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/hardirq.h>
+#include <asm/softirq.h>
+#include <asm/init.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/Naca.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/LparData.h>
+#include <asm/iSeries/HvCall.h>
+#include <asm/iSeries/HvCallCfg.h>
+#include <asm/time.h>
+#include <asm/ppcdebug.h>
+#include "open_pic.h"
+#include <asm/machdep.h>
+
+int smp_threads_ready = 0;
+volatile int smp_commenced = 0;
+int smp_num_cpus = 1;
+int smp_tb_synchronized = 0;
+spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+cycles_t cacheflush_time;
+unsigned long cache_decay_ticks;
+static int max_cpus __initdata = NR_CPUS;
+
+unsigned long cpu_online_map;
+
+volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
+
+#define TB_SYNC_PASSES 4
+volatile unsigned long __initdata tb_sync_flag = 0;
+volatile unsigned long __initdata tb_offset = 0;
+
+extern unsigned char stab_array[];
+
+int start_secondary(void *);
+extern int cpu_idle(void *unused);
+void smp_call_function_interrupt(void);
+void smp_message_pass(int target, int msg, unsigned long data, int wait);
+static unsigned long iSeries_smp_message[NR_CPUS];
+extern struct Naca *naca;
+extern struct Paca xPaca[];
+
+void xics_setup_cpu(void);
+void xics_cause_IPI(int cpu);
+
+/*
+ * XICS only has a single IPI, so encode the messages per CPU
+ */
+volatile unsigned long xics_ipi_message[NR_CPUS] = {0};
+
+#define smp_message_pass(t,m,d,w) ppc_md.smp_message_pass((t),(m),(d),(w))
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mtspr(SPRN_TBWL, 0);
+ mtspr(SPRN_TBWU, upper);
+ mtspr(SPRN_TBWL, lower);
+}
+
+void iSeries_smp_message_recv( struct pt_regs * regs )
+{
+ int cpu = smp_processor_id();
+ int msg;
+
+ if ( smp_num_cpus < 2 )
+ return;
+
+ for ( msg = 0; msg < 4; ++msg )
+ if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
+ smp_message_recv( msg, regs );
+
+}
+
+static void smp_iSeries_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ int i;
+ for (i = 0; i < smp_num_cpus; ++i) {
+ if ( (target == MSG_ALL) ||
+ (target == i) ||
+ ((target == MSG_ALL_BUT_SELF) && (i != smp_processor_id())) ) {
+ set_bit( msg, &iSeries_smp_message[i] );
+ HvCall_sendIPI(&(xPaca[i]));
+ }
+ }
+}
+
+static int smp_iSeries_numProcs(void)
+{
+ unsigned np, i;
+ struct ItLpPaca * lpPaca;
+
+ np = 0;
+ for (i=0; i < maxPacas; ++i) {
+ lpPaca = xPaca[i].xLpPacaPtr;
+ if ( lpPaca->xDynProcStatus < 2 ) {
+ ++np;
+ }
+ }
+ return np;
+}
+
+static int smp_iSeries_probe(void)
+{
+ unsigned i;
+ unsigned np;
+ struct ItLpPaca * lpPaca;
+
+ np = 0;
+ for (i=0; i < maxPacas; ++i) {
+ lpPaca = xPaca[i].xLpPacaPtr;
+ if ( lpPaca->xDynProcStatus < 2 ) {
+ ++np;
+ xPaca[i].next_jiffy_update_tb = xPaca[0].next_jiffy_update_tb;
+ }
+ }
+
+ smp_tb_synchronized = 1;
+ return np;
+}
+
+static void smp_iSeries_kick_cpu(int nr)
+{
+ struct ItLpPaca * lpPaca;
+ /* Verify we have a Paca for processor nr */
+ if ( ( nr <= 0 ) ||
+ ( nr >= maxPacas ) )
+ return;
+ /* Verify that our partition has a processor nr */
+ lpPaca = xPaca[nr].xLpPacaPtr;
+ if ( lpPaca->xDynProcStatus >= 2 )
+ return;
+ /* The processor is currently spinning, waiting
+ * for the xProcStart field to become non-zero
+ * After we set xProcStart, the processor will
+ * continue on to secondary_start in iSeries_head.S
+ */
+ xPaca[nr].xProcStart = 1;
+}
+
+static void smp_iSeries_setup_cpu(int nr)
+{
+}
+
+/* This is called very early. */
+void smp_init_iSeries(void)
+{
+ ppc_md.smp_message_pass = smp_iSeries_message_pass;
+ ppc_md.smp_probe = smp_iSeries_probe;
+ ppc_md.smp_kick_cpu = smp_iSeries_kick_cpu;
+ ppc_md.smp_setup_cpu = smp_iSeries_setup_cpu;
+
+ naca->processorCount = smp_iSeries_numProcs();
+}
+
+
+static void
+smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ /* make sure we're sending something that translates to an IPI */
+ if ( msg > 0x3 ){
+ printk("SMP %d: smp_message_pass: unknown msg %d\n",
+ smp_processor_id(), msg);
+ return;
+ }
+ switch ( target )
+ {
+ case MSG_ALL:
+ openpic_cause_IPI(msg, 0xffffffff);
+ break;
+ case MSG_ALL_BUT_SELF:
+ openpic_cause_IPI(msg,
+ 0xffffffff & ~(1 << smp_processor_id()));
+ break;
+ default:
+ openpic_cause_IPI(msg, 1<<target);
+ break;
+ }
+}
+
+static int
+smp_chrp_probe(void)
+{
+ if (naca->processorCount > 1)
+ openpic_request_IPIs();
+
+ return naca->processorCount;
+}
+
+static void
+smp_kick_cpu(int nr)
+{
+ /* Verify we have a Paca for processor nr */
+ if ( ( nr <= 0 ) ||
+ ( nr >= maxPacas ) )
+ return;
+
+ /* The processor is currently spinning, waiting
+ * for the xProcStart field to become non-zero
+ * After we set xProcStart, the processor will
+ * continue on to secondary_start in iSeries_head.S
+ */
+ xPaca[nr].xProcStart = 1;
+}
+
+extern struct gettimeofday_struct do_gtod;
+
+static void smp_space_timers( unsigned nr )
+{
+ unsigned long offset, i;
+
+ offset = tb_ticks_per_jiffy / nr;
+ for ( i=1; i<nr; ++i ) {
+ xPaca[i].next_jiffy_update_tb = xPaca[i-1].next_jiffy_update_tb + offset;
+ }
+}
+
+static void
+smp_chrp_setup_cpu(int cpu_nr)
+{
+ static atomic_t ready = ATOMIC_INIT(1);
+ static volatile int frozen = 0;
+
+ if (_machine == _MACH_pSeriesLP) {
+ /* timebases already synced under the hypervisor. */
+ xPaca[cpu_nr].next_jiffy_update_tb = tb_last_stamp = get_tb();
+ if (cpu_nr == 0) {
+ do_gtod.tb_orig_stamp = tb_last_stamp;
+ /* Should update do_gtod.stamp_xsec.
+ * For now we leave it which means the time can be some
+ * number of msecs off until someone does a settimeofday()
+ */
+ }
+ smp_tb_synchronized = 1;
+ } else {
+ if (cpu_nr == 0) {
+ /* wait for all the others */
+ while (atomic_read(&ready) < smp_num_cpus)
+ barrier();
+ atomic_set(&ready, 1);
+ /* freeze the timebase */
+ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
+ mb();
+ frozen = 1;
+ set_tb(0, 0);
+ xPaca[0].next_jiffy_update_tb = 0;
+ smp_space_timers(smp_num_cpus);
+ while (atomic_read(&ready) < smp_num_cpus)
+ barrier();
+ /* thaw the timebase again */
+ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
+ mb();
+ frozen = 0;
+ tb_last_stamp = get_tb();
+ do_gtod.tb_orig_stamp = tb_last_stamp;
+ smp_tb_synchronized = 1;
+ } else {
+ atomic_inc(&ready);
+ while (!frozen)
+ barrier();
+ set_tb(0, 0);
+ mb();
+ atomic_inc(&ready);
+ while (frozen)
+ barrier();
+ }
+ }
+
+ if (OpenPIC_Addr) {
+ do_openpic_setup_cpu();
+ } else {
+ if (cpu_nr > 0)
+ xics_setup_cpu();
+ }
+}
+
+static void
+smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ int i;
+
+ for (i = 0; i < smp_num_cpus; ++i) {
+ if (target == MSG_ALL || target == i
+ || (target == MSG_ALL_BUT_SELF
+ && i != smp_processor_id())) {
+ set_bit(msg, &xics_ipi_message[i]);
+ mb();
+ xics_cause_IPI(i);
+ }
+ }
+}
+
+static int
+smp_xics_probe(void)
+{
+ return naca->processorCount;
+}
+
+/* This is called very early */
+void smp_init_pSeries(void)
+{
+ if(naca->interrupt_controller == IC_OPEN_PIC) {
+ ppc_md.smp_message_pass = smp_openpic_message_pass;
+ ppc_md.smp_probe = smp_chrp_probe;
+ ppc_md.smp_kick_cpu = smp_kick_cpu;
+ ppc_md.smp_setup_cpu = smp_chrp_setup_cpu;
+ } else {
+ ppc_md.smp_message_pass = smp_xics_message_pass;
+ ppc_md.smp_probe = smp_xics_probe;
+ ppc_md.smp_kick_cpu = smp_kick_cpu;
+ ppc_md.smp_setup_cpu = smp_chrp_setup_cpu;
+ }
+}
+
+
+void smp_local_timer_interrupt(struct pt_regs * regs)
+{
+ if (!--(get_paca()->prof_counter)) {
+ update_process_times(user_mode(regs));
+ (get_paca()->prof_counter)=get_paca()->prof_multiplier;
+ }
+}
+
+static spinlock_t migration_lock = SPIN_LOCK_UNLOCKED;
+static task_t *new_task;
+
+/*
+ * This function sends a 'task migration' IPI to another CPU.
+ * Must be called from syscall contexts, with interrupts *enabled*.
+ */
+void smp_migrate_task(int cpu, task_t *p)
+{
+ /*
+ * The target CPU will unlock the migration spinlock:
+ */
+ spin_lock(&migration_lock);
+ new_task = p;
+
+ smp_message_pass(cpu, PPC_MSG_MIGRATE_TASK, 0, 0);
+}
+
+/*
+ * Task migration callback.
+ */
+static void smp_task_migration_interrupt(void)
+{
+ task_t *p;
+
+ /* Should we ACK the IPI interrupt early? */
+ p = new_task;
+ spin_unlock(&migration_lock);
+ sched_task_migrated(p);
+}
+
+void smp_message_recv(int msg, struct pt_regs *regs)
+{
+ switch( msg ) {
+ case PPC_MSG_CALL_FUNCTION:
+ smp_call_function_interrupt();
+ break;
+ case PPC_MSG_RESCHEDULE:
+ /* XXX Do we have to do this? */
+ set_need_resched();
+ break;
+ case PPC_MSG_MIGRATE_TASK:
+ smp_task_migration_interrupt();
+ break;
+#ifdef CONFIG_XMON
+ case PPC_MSG_XMON_BREAK:
+ xmon(regs);
+ break;
+#endif /* CONFIG_XMON */
+ default:
+ printk("SMP %d: smp_message_recv(): unknown msg %d\n",
+ smp_processor_id(), msg);
+ break;
+ }
+}
+
+void smp_send_reschedule(int cpu)
+{
+ /*
+ * This is only used if `cpu' is running an idle task,
+ * so it will reschedule itself anyway...
+ *
+ * This isn't the case anymore since the other CPU could be
+ * sleeping and won't reschedule until the next interrupt (such
+ * as the timer).
+ * -- Cort
+ */
+ /* This is only used if `cpu' is running an idle task,
+ so it will reschedule itself anyway... */
+ smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
+}
+
+/*
+ * this function sends a reschedule IPI to all (other) CPUs.
+ * This should only be used if some 'global' task became runnable,
+ * such as a RT task, that must be handled now. The first CPU
+ * that manages to grab the task will run it.
+ */
+void smp_send_reschedule_all(void)
+{
+ smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_RESCHEDULE, 0, 0);
+}
+
+#ifdef CONFIG_XMON
+void smp_send_xmon_break(int cpu)
+{
+ smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0);
+}
+#endif /* CONFIG_XMON */
+
+static void stop_this_cpu(void *dummy)
+{
+ __cli();
+ while (1)
+ ;
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_num_cpus = 1;
+}
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ * Stolen from the i386 version.
+ */
+static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+
+static struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+} *call_data;
+
+/*
+ * This function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ *
+ * [SUMMARY] Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <nonatomic> currently unused.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler, you may call it from a bottom half handler.
+ */
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+
+{
+ struct call_data_struct data;
+ int ret = -1, cpus = smp_num_cpus-1;
+ int timeout;
+
+ if (!cpus)
+ return 0;
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ spin_lock_bh(&call_lock);
+ call_data = &data;
+ /* Send a message to all other CPUs and wait for them to respond */
+ smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0);
+
+ /* Wait for response */
+ timeout = 8000000;
+ while (atomic_read(&data.started) != cpus) {
+ HMT_low();
+ if (--timeout == 0) {
+ printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
+ smp_processor_id(), atomic_read(&data.started));
+#ifdef CONFIG_XMON
+ xmon(0);
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ HvCall_terminateMachineSrc();
+#endif
+ goto out;
+ }
+ barrier();
+ udelay(1);
+ }
+
+ if (wait) {
+ timeout = 1000000;
+ while (atomic_read(&data.finished) != cpus) {
+ HMT_low();
+ if (--timeout == 0) {
+ printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
+ smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
+#ifdef CONFIG_PPC_ISERIES
+ HvCall_terminateMachineSrc();
+#endif
+ goto out;
+ }
+ barrier();
+ udelay(1);
+ }
+ }
+ ret = 0;
+
+ out:
+ HMT_medium();
+ spin_unlock_bh(&call_lock);
+ return ret;
+}
+
+void smp_call_function_interrupt(void)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ (*func)(info);
+ if (wait)
+ atomic_inc(&call_data->finished);
+}
+
+
+extern unsigned long decr_overclock;
+
+struct thread_struct *current_set[NR_CPUS] = {&init_thread_union, 0};
+
+void __init smp_boot_cpus(void)
+{
+ struct Paca *paca;
+ int i, cpu_nr;
+ struct task_struct *p;
+
+ printk("Entering SMP Mode...\n");
+
+ smp_num_cpus = 1;
+ smp_store_cpu_info(0);
+ cpu_online_map = 1UL;
+
+ /*
+ * assume for now that the first cpu booted is
+ * cpu 0, the master -- Cort
+ */
+ cpu_callin_map[0] = 1;
+ /* XXX buggy - Anton */
+ current_thread_info()->cpu = 0;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ paca = &xPaca[i];
+ paca->prof_counter = 1;
+ paca->prof_multiplier = 1;
+ if(i != 0) {
+ /*
+ * Processor 0's segment table is statically
+ * initialized to real address 0x5000. The
+ * Other processor's tables are created and
+ * initialized here.
+ */
+ paca->xStab_data.virt = (unsigned long)&stab_array[PAGE_SIZE * (i-1)];
+ memset((void *)paca->xStab_data.virt, 0, PAGE_SIZE);
+ paca->xStab_data.real = __v2a(paca->xStab_data.virt);
+ paca->default_decr = tb_ticks_per_jiffy / decr_overclock;
+ }
+ }
+
+ /*
+ * XXX very rough, assumes 20 bus cycles to read a cache line,
+ * timebase increments every 4 bus cycles, 32kB L1 data cache.
+ */
+ cacheflush_time = 5 * 1024;
+ /* XXX - Fix - Anton */
+ cache_decay_ticks = 0;
+
+ /* Probe arch for CPUs */
+ cpu_nr = ppc_md.smp_probe();
+
+ printk("Probe found %d CPUs\n", cpu_nr);
+
+ /*
+ * only check for cpus we know exist. We keep the callin map
+ * with cpus at the bottom -- Cort
+ */
+ if (cpu_nr > max_cpus)
+ cpu_nr = max_cpus;
+
+#ifdef CONFIG_ISERIES
+ smp_space_timers( cpu_nr );
+#endif
+
+ printk("Waiting for %d CPUs\n", cpu_nr-1);
+
+ for ( i = 1 ; i < cpu_nr; i++ ) {
+ int c;
+ struct pt_regs regs;
+
+ /* create a process for the processor */
+ /* we don't care about the values in regs since we'll
+ never reschedule the forked task. */
+ /* We DO care about one bit in the pt_regs we
+ pass to do_fork. That is the MSR_FP bit in
+ regs.msr. If that bit is on, then do_fork
+ (via copy_thread) will call giveup_fpu.
+ giveup_fpu will get a pointer to our (current's)
+ last register savearea via current->thread.regs
+ and using that pointer will turn off the MSR_FP,
+ MSR_FE0 and MSR_FE1 bits. At this point, this
+ pointer is pointing to some arbitrary point within
+ our stack */
+
+ memset(®s, 0, sizeof(struct pt_regs));
+
+ if (do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0) < 0)
+ panic("failed fork for CPU %d", i);
+ p = init_task.prev_task;
+ if (!p)
+ panic("No idle task for CPU %d", i);
+
+ init_idle(p, i);
+
+ unhash_process(p);
+
+ xPaca[i].xCurrent = (u64)p;
+ current_set[i] = p->thread_info;
+
+ /* wake up cpus */
+ ppc_md.smp_kick_cpu(i);
+
+ /*
+ * wait to see if the cpu made a callin (is actually up).
+ * use this value that I found through experimentation.
+ * -- Cort
+ */
+ for ( c = 5000; c && !cpu_callin_map[i] ; c-- ) {
+ udelay(100);
+ }
+
+ if ( cpu_callin_map[i] )
+ {
+ printk("Processor %d found.\n", i);
+ /* this sync's the decr's -- Cort */
+ smp_num_cpus++;
+ } else {
+ printk("Processor %d is stuck.\n", i);
+ }
+ }
+
+ /* Setup CPU 0 last (important) */
+ ppc_md.smp_setup_cpu(0);
+
+ if (smp_num_cpus < 2) {
+ tb_last_stamp = get_tb();
+ smp_tb_synchronized = 1;
+ }
+}
+
+void __init smp_commence(void)
+{
+ /*
+ * Lets the callin's below out of their loop.
+ */
+ PPCDBG(PPCDBG_SMP, "smp_commence: start\n");
+ wmb();
+ smp_commenced = 1;
+}
+
+void __init smp_callin(void)
+{
+ int cpu = smp_processor_id();
+
+ smp_store_cpu_info(cpu);
+ set_dec(xPaca[cpu].default_decr);
+ cpu_callin_map[cpu] = 1;
+ set_bit(smp_processor_id(), &cpu_online_map);
+
+ ppc_md.smp_setup_cpu(cpu);
+
+ while(!smp_commenced) {
+ barrier();
+ }
+ __sti();
+}
+
+/* intel needs this */
+void __init initialize_secondary(void)
+{
+}
+
+/* Activate a secondary processor. */
+int start_secondary(void *unused)
+{
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ smp_callin();
+
+ return cpu_idle(NULL);
+}
+
+void __init smp_setup(char *str, int *ints)
+{
+}
+
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+/* this function is called for each processor
+ */
+void __init smp_store_cpu_info(int id)
+{
+ xPaca[id].pvr = _get_PVR();
+}
+
+static int __init maxcpus(char *str)
+{
+ get_option(&str, &max_cpus);
+ return 1;
+}
+
+__setup("maxcpus=", maxcpus);
--- /dev/null
+/*
+ * PowerPC64 Segment Translation Support.
+ *
+ * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
+ * Copyright (c) 2001 Dave Engebretsen
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/Paca.h>
+#include <asm/Naca.h>
+#include <asm/pmc.h>
+
+inline int make_ste(unsigned long stab,
+ unsigned long esid, unsigned long vsid);
+inline void make_slbe(unsigned long esid, unsigned long vsid,
+ int large);
+extern struct Naca *naca;
+
+/*
+ * Build an entry for the base kernel segment and put it into
+ * the segment table or SLB. All other segment table or SLB
+ * entries are faulted in.
+ */
+void stab_initialize(unsigned long stab)
+{
+ unsigned long esid, vsid;
+
+ esid = GET_ESID(KERNELBASE);
+ vsid = get_kernel_vsid(esid << SID_SHIFT);
+
+ if (!__is_processor(PV_POWER4)) {
+ __asm__ __volatile__("isync; slbia; isync":::"memory");
+ make_ste(stab, esid, vsid);
+ } else {
+ /* Invalidate the entire SLB & all the ERATS */
+ __asm__ __volatile__("isync" : : : "memory");
+#ifndef CONFIG_PPC_ISERIES
+ __asm__ __volatile__("slbmte %0,%0"
+ : : "r" (0) : "memory");
+ __asm__ __volatile__("isync; slbia; isync":::"memory");
+ make_slbe(esid, vsid, 0);
+#else
+ __asm__ __volatile__("isync; slbia; isync":::"memory");
+#endif
+ }
+}
+
+/*
+ * Create a segment table entry for the given esid/vsid pair.
+ */
+inline int
+make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
+{
+ unsigned long entry, group, old_esid, castout_entry, i;
+ unsigned int global_entry;
+ STE *ste, *castout_ste;
+
+ /* Search the primary group first. */
+ global_entry = (esid & 0x1f) << 3;
+ ste = (STE *)(stab | ((esid & 0x1f) << 7));
+
+ /*
+ * Find an empty entry, if one exists.
+ */
+ for(group = 0; group < 2; group++) {
+ for(entry = 0; entry < 8; entry++, ste++) {
+ if(!(ste->dw0.dw0.v)) {
+ ste->dw1.dw1.vsid = vsid;
+ /* Order VSID updte */
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ ste->dw0.dw0.esid = esid;
+ ste->dw0.dw0.v = 1;
+ ste->dw0.dw0.kp = 1;
+ /* Order update */
+ __asm__ __volatile__ ("sync" : : : "memory");
+
+ return(global_entry | entry);
+ }
+ }
+ /* Now search the secondary group. */
+ global_entry = ((~esid) & 0x1f) << 3;
+ ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
+ }
+
+ /*
+ * Could not find empty entry, pick one with a round robin selection.
+ * Search all entries in the two groups. Note that the first time
+ * we get here, we start with entry 1 so the initializer
+ * can be common with the SLB castout code.
+ */
+
+ /* This assumes we never castout when initializing the stab. */
+ PMC_SW_PROCESSOR(stab_capacity_castouts);
+
+ castout_entry = get_paca()->xStab_data.next_round_robin;
+ for(i = 0; i < 16; i++) {
+ if(castout_entry < 8) {
+ global_entry = (esid & 0x1f) << 3;
+ ste = (STE *)(stab | ((esid & 0x1f) << 7));
+ castout_ste = ste + castout_entry;
+ } else {
+ global_entry = ((~esid) & 0x1f) << 3;
+ ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
+ castout_ste = ste + (castout_entry - 8);
+ }
+
+ if((((castout_ste->dw0.dw0.esid) >> 32) == 0) ||
+ (((castout_ste->dw0.dw0.esid) & 0xffffffff) > 0)) {
+ /* Found an entry to castout. It is either a user */
+ /* region, or a secondary kernel segment. */
+ break;
+ }
+
+ castout_entry = (castout_entry + 1) & 0xf;
+ }
+
+ get_paca()->xStab_data.next_round_robin = (castout_entry + 1) & 0xf;
+
+ /* Modify the old entry to the new value. */
+
+ /* Force previous translations to complete. DRENG */
+ __asm__ __volatile__ ("isync" : : : "memory" );
+
+ castout_ste->dw0.dw0.v = 0;
+ __asm__ __volatile__ ("sync" : : : "memory" ); /* Order update */
+ castout_ste->dw1.dw1.vsid = vsid;
+ __asm__ __volatile__ ("eieio" : : : "memory" ); /* Order update */
+ old_esid = castout_ste->dw0.dw0.esid;
+ castout_ste->dw0.dw0.esid = esid;
+ castout_ste->dw0.dw0.v = 1;
+ castout_ste->dw0.dw0.kp = 1;
+ __asm__ __volatile__ ("slbie %0" : : "r" (old_esid << SID_SHIFT));
+ /* Ensure completion of slbie */
+ __asm__ __volatile__ ("sync" : : : "memory" );
+
+ return(global_entry | (castout_entry & 0x7));
+}
+
+/*
+ * Create a segment buffer entry for the given esid/vsid pair.
+ */
+inline void make_slbe(unsigned long esid, unsigned long vsid, int large)
+{
+ unsigned long entry, castout_entry;
+ slb_dword0 castout_esid_data;
+ union {
+ unsigned long word0;
+ slb_dword0 data;
+ } esid_data;
+ union {
+ unsigned long word0;
+ slb_dword1 data;
+ } vsid_data;
+
+ /*
+ * Find an empty entry, if one exists.
+ */
+ for(entry = 0; entry < naca->slb_size; entry++) {
+ __asm__ __volatile__("slbmfee %0,%1"
+ : "=r" (esid_data) : "r" (entry));
+ if(!esid_data.data.v) {
+ /*
+ * Write the new SLB entry.
+ */
+ vsid_data.word0 = 0;
+ vsid_data.data.vsid = vsid;
+ vsid_data.data.kp = 1;
+ if (large)
+ vsid_data.data.l = 1;
+
+ esid_data.word0 = 0;
+ esid_data.data.esid = esid;
+ esid_data.data.v = 1;
+ esid_data.data.index = entry;
+
+ /* slbie not needed as no previous mapping existed. */
+ /* Order update */
+ __asm__ __volatile__ ("isync" : : : "memory");
+ __asm__ __volatile__ ("slbmte %0,%1"
+ : : "r" (vsid_data),
+ "r" (esid_data));
+ /* Order update */
+ __asm__ __volatile__ ("isync" : : : "memory");
+ return;
+ }
+ }
+
+ /*
+ * Could not find empty entry, pick one with a round robin selection.
+ */
+
+ PMC_SW_PROCESSOR(stab_capacity_castouts);
+
+ castout_entry = get_paca()->xStab_data.next_round_robin;
+ __asm__ __volatile__("slbmfee %0,%1"
+ : "=r" (castout_esid_data)
+ : "r" (castout_entry));
+
+ entry = castout_entry;
+ castout_entry++;
+ if(castout_entry >= naca->slb_size) {
+ castout_entry = 1;
+ }
+ get_paca()->xStab_data.next_round_robin = castout_entry;
+
+ /* Invalidate the old entry. */
+ castout_esid_data.v = 0; /* Set the class to 0 */
+ /* slbie not needed as the previous mapping is still valid. */
+ __asm__ __volatile__("slbie %0" : : "r" (castout_esid_data));
+
+ /*
+ * Write the new SLB entry.
+ */
+ vsid_data.word0 = 0;
+ vsid_data.data.vsid = vsid;
+ vsid_data.data.kp = 1;
+ if (large)
+ vsid_data.data.l = 1;
+
+ esid_data.word0 = 0;
+ esid_data.data.esid = esid;
+ esid_data.data.v = 1;
+ esid_data.data.index = entry;
+
+ __asm__ __volatile__ ("isync" : : : "memory"); /* Order update */
+ __asm__ __volatile__ ("slbmte %0,%1"
+ : : "r" (vsid_data), "r" (esid_data));
+ __asm__ __volatile__ ("isync" : : : "memory" ); /* Order update */
+}
+
+/*
+ * Allocate a segment table entry for the given ea.
+ */
+int ste_allocate ( unsigned long ea,
+ unsigned long trap)
+{
+ unsigned long vsid, esid;
+ int kernel_segment = 0;
+
+ PMC_SW_PROCESSOR(stab_faults);
+
+ /* Check for invalid effective addresses. */
+ if (!IS_VALID_EA(ea)) {
+ return 1;
+ }
+
+ /* Kernel or user address? */
+ if (REGION_ID(ea) >= KERNEL_REGION_ID) {
+ kernel_segment = 1;
+ vsid = get_kernel_vsid( ea );
+ } else {
+ struct mm_struct *mm = current->mm;
+ if ( mm ) {
+ vsid = get_vsid(mm->context, ea );
+ } else {
+ return 1;
+ }
+ }
+
+ esid = GET_ESID(ea);
+ if (trap == 0x380 || trap == 0x480) {
+#ifndef CONFIG_PPC_ISERIES
+ if (REGION_ID(ea) == KERNEL_REGION_ID)
+ make_slbe(esid, vsid, 1);
+ else
+#endif
+ make_slbe(esid, vsid, 0);
+ } else {
+ unsigned char top_entry, stab_entry, *segments;
+
+ stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
+ PMC_SW_PROCESSOR_A(stab_entry_use, stab_entry & 0xf);
+
+ segments = get_paca()->xSegments;
+ top_entry = segments[0];
+ if(!kernel_segment && top_entry < (STAB_CACHE_SIZE - 1)) {
+ top_entry++;
+ segments[top_entry] = stab_entry;
+ if(top_entry == STAB_CACHE_SIZE - 1) top_entry = 0xff;
+ segments[0] = top_entry;
+ }
+ }
+
+ return(0);
+}
+
+/*
+ * Flush all entries from the segment table of the current processor.
+ * Kernel and Bolted entries are not removed as we cannot tolerate
+ * faults on those addresses.
+ */
+
+#define STAB_PRESSURE 0
+
+void flush_stab(void)
+{
+ STE *stab = (STE *) get_paca()->xStab_data.virt;
+ unsigned char *segments = get_paca()->xSegments;
+ unsigned long flags, i;
+
+ if(!__is_processor(PV_POWER4)) {
+ unsigned long entry;
+ STE *ste;
+
+ /* Force previous translations to complete. DRENG */
+ __asm__ __volatile__ ("isync" : : : "memory");
+
+ __save_and_cli(flags);
+ if(segments[0] != 0xff && !STAB_PRESSURE) {
+ for(i = 1; i <= segments[0]; i++) {
+ ste = stab + segments[i];
+ ste->dw0.dw0.v = 0;
+ PMC_SW_PROCESSOR(stab_invalidations);
+ }
+ } else {
+ /* Invalidate all entries. */
+ ste = stab;
+
+ /* Never flush the first entry. */
+ ste += 1;
+ for(entry = 1;
+ entry < (PAGE_SIZE / sizeof(STE));
+ entry++, ste++) {
+ unsigned long ea;
+ ea = ste->dw0.dw0.esid << SID_SHIFT;
+ if (STAB_PRESSURE || ea < KERNELBASE) {
+ ste->dw0.dw0.v = 0;
+ PMC_SW_PROCESSOR(stab_invalidations);
+ }
+ }
+ }
+
+ *((unsigned long *)segments) = 0;
+ __restore_flags(flags);
+
+ /* Invalidate the SLB. */
+ /* Force invals to complete. */
+ __asm__ __volatile__ ("sync" : : : "memory");
+ /* Flush the SLB. */
+ __asm__ __volatile__ ("slbia" : : : "memory");
+ /* Force flush to complete. */
+ __asm__ __volatile__ ("sync" : : : "memory");
+ } else {
+ unsigned long flags;
+
+ PMC_SW_PROCESSOR(stab_invalidations);
+
+ __save_and_cli(flags);
+ __asm__ __volatile__("isync; slbia; isync":::"memory");
+ __restore_flags(flags);
+ }
+}
--- /dev/null
+/*
+ * sys32.S: I-cache tricks for 32-bit compatability layer simple
+ * conversions.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 2000 Ken Aaker (kdaaker@rchland.vnet.ibm.com)
+ * For PPC ABI convention is parms in Regs 3-10.
+ * The router in entry.S clears the high 32 bits in the first
+ * 4 arguments (R3-R6).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ppc_asm.h"
+#include <asm/errno.h>
+#include <asm/processor.h>
+
+/* NOTE: call as jump breaks return stack, we have to avoid that */
+
+ .text
+
+_GLOBAL(sys32_mmap)
+ clrldi r7, r7, 32 /* int fd parm */
+ clrldi r8, r8, 32 /* off_t offset parm */
+ b .sys_mmap
+
+_GLOBAL(sys32_lseek)
+ extsw r4,r4 /* sign extend off_t offset parm */
+ b .sys_lseek
+
+_GLOBAL(sys32_chmod)
+/* Ken Aaker.. hmmm maybe I don't need to do anything here */
+ b .sys_chmod
+
+_GLOBAL(sys32_mknod)
+/* Ken Aaker.. hmmm maybe I don't need to do anything here */
+ b .sys_mknod
+
+_GLOBAL(sys32_sendto)
+ clrldi r7, r7, 32 /* struct sockaddr *addr parm */
+ clrldi r8, r8, 32 /* int addr_len parm */
+ b .sys_sendto
+
+_GLOBAL(sys32_recvfrom)
+ clrldi r7, r7, 32 /* struct sockaddr *addr parm */
+ clrldi r8, r8, 32 /* int *addr_len parm */
+ b .sys_recvfrom
+
+_GLOBAL(sys32_getsockopt)
+ clrldi r7, r7, 32 /* int *optlen parm */
+ b .sys_getsockopt
+
+_GLOBAL(sys32_bdflush)
+ extsw r4,r4 /* sign extend long data parm */
+ b .sys_bdflush
+
+_GLOBAL(sys32_mmap2)
+ clrldi r7, r7, 32 /* unsigned long fd parm */
+ extsw r8, r8 /* off_t offset */
+ b .sys_mmap
+
+_GLOBAL(sys32_socketcall) /* r3=call, r4=args */
+ cmpwi r3, 1
+ blt- .do_einval
+ cmpwi r3, 17
+ bgt- .do_einval
+ subi r3, r3, 1 /* index into socketcall_table vectors and jmp */
+ sldi r3, r3, 3 /* each entry is 8 bytes */
+ LOADADDR(r10,.socketcall_table_begin)
+ ldx r10, r10, r3
+ mtctr r10
+ bctr
+
+/* Socket function vectored fix ups for 32 bit */
+_STATIC(do_sys_socket) /* sys_socket(int, int, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwa r4,4(r10)
+ lwa r5,8(r10)
+ b .sys_socket
+
+_STATIC(do_sys_bind) /* sys_bind(int fd, struct sockaddr *, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwa r5,8(r10)
+ b .sys_bind
+
+_STATIC(do_sys_connect) /* sys_connect(int, struct sockaddr *, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwa r5,8(r10)
+ b .sys_connect
+
+_STATIC(do_sys_listen) /* sys_listen(int, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwa r4,4(r10)
+ b .sys_listen
+
+_STATIC(do_sys_accept) /* sys_accept(int, struct sockaddr *, int *) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ b .sys_accept
+
+_STATIC(do_sys_getsockname) /* sys_getsockname(int, struct sockaddr *, int *) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ b .sys_getsockname
+
+_STATIC(do_sys_getpeername) /* sys_getpeername(int, struct sockaddr *, int *) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ b .sys_getpeername
+
+_STATIC(do_sys_socketpair) /* sys_socketpair(int, int, int, int *) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwa r4,4(r10)
+ lwa r5,8(r10)
+ lwz r6,12(r10)
+ b .sys_socketpair
+
+_STATIC(do_sys_send) /* sys_send(int, void *, size_t, unsigned int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ lwz r6,12(r10)
+ b .sys_send
+
+_STATIC(do_sys_recv) /* sys_recv(int, void *, size_t, unsigned int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ lwz r6,12(r10)
+ b .sys_recv
+
+_STATIC(do_sys_sendto) /* sys32_sendto(int, u32, __kernel_size_t32, unsigned int, u32, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ lwz r6,12(r10)
+ lwz r7,16(r10)
+ lwa r8,20(r10)
+ b .sys32_sendto
+
+_STATIC(do_sys_recvfrom) /* sys32_recvfrom(int, u32, __kernel_size_t32, unsigned int, u32, u32) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwz r5,8(r10)
+ lwz r6,12(r10)
+ lwz r7,16(r10)
+ lwz r8,20(r10)
+ b .sys32_recvfrom
+
+_STATIC(do_sys_shutdown) /* sys_shutdown(int, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwa r4,4(r10)
+ b .sys_shutdown
+
+_STATIC(do_sys_setsockopt) /* sys32_setsockopt(int, int, int, char *, int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwa r4,4(r10)
+ lwa r5,8(r10)
+ lwz r6,12(r10)
+ lwa r7,16(r10)
+ b .sys32_setsockopt
+
+_STATIC(do_sys_getsockopt) /* sys32_getsockopt(int, int, int, u32, u32) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwa r4,4(r10)
+ lwa r5,8(r10)
+ lwz r6,12(r10)
+ lwz r7,16(r10)
+ b .sys32_getsockopt
+
+_STATIC(do_sys_sendmsg) /* sys32_sendmsg(int, struct msghdr32 *, unsigned int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwa r5,8(r10)
+ b .sys32_sendmsg
+
+_STATIC(do_sys_recvmsg) /* sys32_recvmsg(int, struct msghdr32 *, unsigned int) */
+ mr r10,r4
+ lwa r3,0(r10)
+ lwz r4,4(r10)
+ lwa r5,8(r10)
+ b .sys32_recvmsg
+
+_STATIC(do_einval)
+ li r3,-EINVAL
+ b .ret_from_syscall_1
+_STATIC(do_efault)
+ li r3,-EFAULT
+ b .ret_from_syscall_1
+
+ .data
+ .align 8
+_GLOBAL(socketcall_table_begin)
+ .llong .do_sys_socket
+ .llong .do_sys_bind
+ .llong .do_sys_connect
+ .llong .do_sys_listen
+ .llong .do_sys_accept
+ .llong .do_sys_getsockname
+ .llong .do_sys_getpeername
+ .llong .do_sys_socketpair
+ .llong .do_sys_send
+ .llong .do_sys_recv
+ .llong .do_sys_sendto
+ .llong .do_sys_recvfrom
+ .llong .do_sys_shutdown
+ .llong .do_sys_setsockopt
+ .llong .do_sys_getsockopt
+ .llong .do_sys_sendmsg
+ .llong .do_sys_recvmsg
+_GLOBAL(socketcall_table_end)
+ .section __ex_table,"a"
+ .align 3
+ .llong .socketcall_table_begin
+ .llong 0
+ .llong .socketcall_table_end
+ .llong .do_efault
+ .previous
--- /dev/null
+/*
+ * sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 2001 IBM
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ptrace.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/utime.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/smb_fs.h>
+#include <linux/smb_mount.h>
+#include <linux/ncp_fs.h>
+#include <linux/quota.h>
+#include <linux/module.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/filter.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/sysctl.h>
+#include <linux/binfmts.h>
+
+#include <asm/types.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+
+#include <asm/semaphore.h>
+
+#include <net/scm.h>
+#include <linux/elf.h>
+#include <asm/ppcdebug.h>
+#include <asm/time.h>
+#include <asm/ppc32.h>
+
+extern unsigned long wall_jiffies;
+#define USEC_PER_SEC (1000000)
+
+/*
+ * These are the flags in the MSR that the user is allowed to change
+ * by modifying the saved value of the MSR on the stack. SE and BE
+ * should not be in this list since gdb may want to change these. I.e,
+ * you should be able to step out of a signal handler to see what
+ * instruction executes next after the signal handler completes.
+ * Alternately, if you stepped into a signal handler, you should be
+ * able to continue 'til the next breakpoint from within the signal
+ * handler, even if the handler returns.
+ */
+#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
+
+/* In order to reduce some races, while at the same time doing additional
+ * checking and hopefully speeding things up, we copy filenames to the
+ * kernel data space before using them..
+ *
+ * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
+ */
+static inline int do_getname32(const char *filename, char *page)
+{
+ int retval;
+
+ /* 32bit pointer will be always far below TASK_SIZE :)) */
+ retval = strncpy_from_user((char *)page, (char *)filename, PAGE_SIZE);
+ if (retval > 0) {
+ if (retval < PAGE_SIZE)
+ return 0;
+ return -ENAMETOOLONG;
+ } else if (!retval)
+ retval = -ENOENT;
+ return retval;
+}
+
+char * getname32(const char *filename)
+{
+ char *tmp, *result;
+
+ result = ERR_PTR(-ENOMEM);
+ tmp = __getname();
+ if (tmp) {
+ int retval = do_getname32(filename, tmp);
+
+ result = tmp;
+ if (retval < 0) {
+ putname(tmp);
+ result = ERR_PTR(retval);
+ }
+ }
+ return result;
+}
+
+
+
+extern asmlinkage long sys_utime(char * filename, struct utimbuf * times);
+
+struct utimbuf32 {
+ __kernel_time_t32 actime, modtime;
+};
+
+asmlinkage long sys32_utime(char * filename, struct utimbuf32 *times)
+{
+ struct utimbuf t;
+ mm_segment_t old_fs;
+ int ret;
+ char *filenam;
+
+ PPCDBG(PPCDBG_SYS32NI, "sys32_utime - running - filename=%s, times=%p - pid=%ld, comm=%s \n", filename, times, current->pid, current->comm);
+
+ if (!times)
+ return sys_utime(filename, NULL);
+ if (get_user(t.actime, ×->actime) || __get_user(t.modtime, ×->modtime))
+ return -EFAULT;
+ filenam = getname32(filename);
+
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ ret = sys_utime(filenam, &t);
+ set_fs (old_fs);
+ putname (filenam);
+ }
+
+ return ret;
+}
+
+
+
+struct iovec32 { u32 iov_base; __kernel_size_t32 iov_len; };
+
+typedef ssize_t (*IO_fn_t)(struct file *, char *, size_t, loff_t *);
+
+static long do_readv_writev32(int type, struct file *file,
+ const struct iovec32 *vector, u32 count)
+{
+ unsigned long tot_len;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov=iovstack, *ivp;
+ struct inode *inode;
+ long retval, i;
+ IO_fn_t fn;
+
+ /* First get the "struct iovec" from user memory and
+ * verify all the pointers
+ */
+ if (!count)
+ return 0;
+ if(verify_area(VERIFY_READ, vector, sizeof(struct iovec32)*count))
+ return -EFAULT;
+ if (count > UIO_MAXIOV)
+ return -EINVAL;
+ if (count > UIO_FASTIOV) {
+ iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL);
+ if (!iov)
+ return -ENOMEM;
+ }
+
+ tot_len = 0;
+ i = count;
+ ivp = iov;
+ while(i > 0) {
+ u32 len;
+ u32 buf;
+
+ __get_user(len, &vector->iov_len);
+ __get_user(buf, &vector->iov_base);
+ tot_len += len;
+ ivp->iov_base = (void *)A(buf);
+ ivp->iov_len = (__kernel_size_t) len;
+ vector++;
+ ivp++;
+ i--;
+ }
+
+ inode = file->f_dentry->d_inode;
+ /* VERIFY_WRITE actually means a read, as we write to user space */
+ retval = locks_verify_area((type == VERIFY_WRITE
+ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE),
+ inode, file, file->f_pos, tot_len);
+ if (retval) {
+ if (iov != iovstack)
+ kfree(iov);
+ return retval;
+ }
+
+ /* Then do the actual IO. Note that sockets need to be handled
+ * specially as they have atomicity guarantees and can handle
+ * iovec's natively
+ */
+ if (inode->i_sock) {
+ int err;
+ err = sock_readv_writev(type, inode, file, iov, count, tot_len);
+ if (iov != iovstack)
+ kfree(iov);
+ return err;
+ }
+
+ if (!file->f_op) {
+ if (iov != iovstack)
+ kfree(iov);
+ return -EINVAL;
+ }
+ /* VERIFY_WRITE actually means a read, as we write to user space */
+ fn = file->f_op->read;
+ if (type == VERIFY_READ)
+ fn = (IO_fn_t) file->f_op->write;
+ ivp = iov;
+ while (count > 0) {
+ void * base;
+ int len, nr;
+
+ base = ivp->iov_base;
+ len = ivp->iov_len;
+ ivp++;
+ count--;
+ nr = fn(file, base, len, &file->f_pos);
+ if (nr < 0) {
+ if (retval)
+ break;
+ retval = nr;
+ break;
+ }
+ retval += nr;
+ if (nr != len)
+ break;
+ }
+ if (iov != iovstack)
+ kfree(iov);
+ return retval;
+}
+
+asmlinkage long sys32_readv(u32 fd, struct iovec32 *vector, u32 count)
+{
+ struct file *file;
+ long ret = -EBADF;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_readv - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ file = fget(fd);
+ if(!file)
+ goto bad_file;
+
+ if (file->f_op && (file->f_mode & FMODE_READ) &&
+ (file->f_op->readv || file->f_op->read))
+ ret = do_readv_writev32(VERIFY_WRITE, file, vector, count);
+ fput(file);
+
+bad_file:
+ PPCDBG(PPCDBG_SYS32, "sys32_readv - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return ret;
+}
+
+asmlinkage long sys32_writev(u32 fd, struct iovec32 *vector, u32 count)
+{
+ struct file *file;
+ int ret = -EBADF;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_writev - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ file = fget(fd);
+ if(!file)
+ goto bad_file;
+ if (file->f_op && (file->f_mode & FMODE_WRITE) &&
+ (file->f_op->writev || file->f_op->write))
+ ret = do_readv_writev32(VERIFY_READ, file, vector, count);
+ fput(file);
+
+bad_file:
+ PPCDBG(PPCDBG_SYS32, "sys32_writev - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return ret;
+}
+
+
+
+static inline int get_flock(struct flock *kfl, struct flock32 *ufl)
+{
+ int err;
+
+ err = get_user(kfl->l_type, &ufl->l_type);
+ err |= __get_user(kfl->l_whence, &ufl->l_whence);
+ err |= __get_user(kfl->l_start, &ufl->l_start);
+ err |= __get_user(kfl->l_len, &ufl->l_len);
+ err |= __get_user(kfl->l_pid, &ufl->l_pid);
+ return err;
+}
+
+static inline int put_flock(struct flock *kfl, struct flock32 *ufl)
+{
+ int err;
+
+ err = __put_user(kfl->l_type, &ufl->l_type);
+ err |= __put_user(kfl->l_whence, &ufl->l_whence);
+ err |= __put_user(kfl->l_start, &ufl->l_start);
+ err |= __put_user(kfl->l_len, &ufl->l_len);
+ err |= __put_user(kfl->l_pid, &ufl->l_pid);
+ return err;
+}
+
+extern asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
+asmlinkage long sys32_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case F_GETLK:
+ case F_SETLK:
+ case F_SETLKW:
+ {
+ struct flock f;
+ mm_segment_t old_fs;
+ long ret;
+
+ if(get_flock(&f, (struct flock32 *)arg))
+ return -EFAULT;
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ ret = sys_fcntl(fd, cmd, (unsigned long)&f);
+ set_fs (old_fs);
+ if(put_flock(&f, (struct flock32 *)arg))
+ return -EFAULT;
+ return ret;
+ }
+ default:
+ return sys_fcntl(fd, cmd, (unsigned long)arg);
+ }
+}
+
+struct ncp_mount_data32 {
+ int version;
+ unsigned int ncp_fd;
+ __kernel_uid_t32 mounted_uid;
+ __kernel_pid_t32 wdog_pid;
+ unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
+ unsigned int time_out;
+ unsigned int retry_count;
+ unsigned int flags;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_mode_t32 file_mode;
+ __kernel_mode_t32 dir_mode;
+};
+
+static void *do_ncp_super_data_conv(void *raw_data)
+{
+ struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data;
+ struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data;
+
+ n->dir_mode = n32->dir_mode;
+ n->file_mode = n32->file_mode;
+ n->gid = n32->gid;
+ n->uid = n32->uid;
+ memmove (n->mounted_vol, n32->mounted_vol, (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int)));
+ n->wdog_pid = n32->wdog_pid;
+ n->mounted_uid = n32->mounted_uid;
+ return raw_data;
+}
+
+struct smb_mount_data32 {
+ int version;
+ __kernel_uid_t32 mounted_uid;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_mode_t32 file_mode;
+ __kernel_mode_t32 dir_mode;
+};
+
+static void *do_smb_super_data_conv(void *raw_data)
+{
+ struct smb_mount_data *s = (struct smb_mount_data *)raw_data;
+ struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data;
+
+ s->version = s32->version;
+ s->mounted_uid = s32->mounted_uid;
+ s->uid = s32->uid;
+ s->gid = s32->gid;
+ s->file_mode = s32->file_mode;
+ s->dir_mode = s32->dir_mode;
+ return raw_data;
+}
+
+static int copy_mount_stuff_to_kernel(const void *user, unsigned long *kernel)
+{
+ int i;
+ unsigned long page;
+ struct vm_area_struct *vma;
+
+ *kernel = 0;
+ if(!user)
+ return 0;
+ vma = find_vma(current->mm, (unsigned long)user);
+ if(!vma || (unsigned long)user < vma->vm_start)
+ return -EFAULT;
+ if(!(vma->vm_flags & VM_READ))
+ return -EFAULT;
+ i = vma->vm_end - (unsigned long) user;
+ if(PAGE_SIZE <= (unsigned long) i)
+ i = PAGE_SIZE - 1;
+ if(!(page = __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ if(copy_from_user((void *) page, user, i)) {
+ free_page(page);
+ return -EFAULT;
+ }
+ *kernel = page;
+ return 0;
+}
+
+#define SMBFS_NAME "smbfs"
+#define NCPFS_NAME "ncpfs"
+
+asmlinkage long sys32_mount(char *dev_name, char *dir_name, char *type, unsigned long new_flags, u32 data)
+{
+ unsigned long type_page = 0;
+ unsigned long data_page = 0;
+ unsigned long dev_page = 0;
+ unsigned long dir_page = 0;
+ int err, is_smb, is_ncp;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_mount - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ is_smb = is_ncp = 0;
+
+ err = copy_mount_stuff_to_kernel((const void *)type, &type_page);
+ if (err)
+ goto out;
+
+ if (!type_page) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ is_smb = !strcmp((char *)type_page, SMBFS_NAME);
+ is_ncp = !strcmp((char *)type_page, NCPFS_NAME);
+
+ err = copy_mount_stuff_to_kernel((const void *)AA(data), &data_page);
+ if (err)
+ goto type_out;
+
+ err = copy_mount_stuff_to_kernel(dev_name, &dev_page);
+ if (err)
+ goto data_out;
+
+ err = copy_mount_stuff_to_kernel(dir_name, &dir_page);
+ if (err)
+ goto dev_out;
+
+ if (!is_smb && !is_ncp) {
+ lock_kernel();
+ err = do_mount((char*)dev_page, (char*)dir_page,
+ (char*)type_page, new_flags, (char*)data_page);
+ unlock_kernel();
+ } else {
+ if (is_ncp)
+ do_ncp_super_data_conv((void *)data_page);
+ else
+ do_smb_super_data_conv((void *)data_page);
+
+ lock_kernel();
+ err = do_mount((char*)dev_page, (char*)dir_page,
+ (char*)type_page, new_flags, (char*)data_page);
+ unlock_kernel();
+ }
+ free_page(dir_page);
+
+dev_out:
+ free_page(dev_page);
+
+data_out:
+ free_page(data_page);
+
+type_out:
+ free_page(type_page);
+
+out:
+
+ PPCDBG(PPCDBG_SYS32, "sys32_mount - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return err;
+}
+
+struct dqblk32 {
+ __u32 dqb_bhardlimit;
+ __u32 dqb_bsoftlimit;
+ __u32 dqb_curblocks;
+ __u32 dqb_ihardlimit;
+ __u32 dqb_isoftlimit;
+ __u32 dqb_curinodes;
+ __kernel_time_t32 dqb_btime;
+ __kernel_time_t32 dqb_itime;
+};
+
+
+extern asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr);
+
+/* Note: it is necessary to treat cmd and id as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_quotactl(u32 cmd_parm, const char *special, u32 id_parm, unsigned long addr)
+{
+ int cmd = (int)cmd_parm;
+ int id = (int)id_parm;
+ int cmds = cmd >> SUBCMDSHIFT;
+ int err;
+ struct dqblk d;
+ mm_segment_t old_fs;
+ char *spec;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_quotactl - entered - pid=%ld current=%lx comm=%s \n",
+ current->pid, current, current->comm);
+
+ switch (cmds) {
+ case Q_GETQUOTA:
+ break;
+ case Q_SETQUOTA:
+ case Q_SETUSE:
+ case Q_SETQLIM:
+ if (copy_from_user (&d, (struct dqblk32 *)addr,
+ sizeof (struct dqblk32)))
+ return -EFAULT;
+ d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime;
+ d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime;
+ break;
+ default:
+ return sys_quotactl(cmd, special,
+ id, (caddr_t)addr);
+ }
+ spec = getname32 (special);
+ err = PTR_ERR(spec);
+ if (IS_ERR(spec)) return err;
+ old_fs = get_fs ();
+ set_fs (KERNEL_DS);
+ err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d);
+ set_fs (old_fs);
+ putname (spec);
+ if (cmds == Q_GETQUOTA) {
+ __kernel_time_t b = d.dqb_btime, i = d.dqb_itime;
+ ((struct dqblk32 *)&d)->dqb_itime = i;
+ ((struct dqblk32 *)&d)->dqb_btime = b;
+ if (copy_to_user ((struct dqblk32 *)addr, &d,
+ sizeof (struct dqblk32)))
+ return -EFAULT;
+ }
+
+ PPCDBG(PPCDBG_SYS32, "sys32_quotactl - exited - pid=%ld current=%lx comm=%s \n",
+ current->pid, current, current->comm);
+
+ return err;
+}
+
+
+
+/* readdir & getdents */
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
+
+struct old_linux_dirent32 {
+ u32 d_ino;
+ u32 d_offset;
+ unsigned short d_namlen;
+ /* unsigned char d_type; */
+ char d_name[1];
+};
+
+struct readdir_callback32 {
+ struct old_linux_dirent32 * dirent;
+ int count;
+};
+
+static int fillonedir(void * __buf, const char * name, int namlen,
+ off_t offset, ino_t ino, unsigned int d_type)
+{
+ struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
+ struct old_linux_dirent32 * dirent;
+
+ if (buf->count)
+ return -EINVAL;
+ buf->count++;
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(offset, &dirent->d_offset);
+ put_user(namlen, &dirent->d_namlen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ return 0;
+}
+
+asmlinkage int old32_readdir(unsigned int fd, struct old_linux_dirent32 *dirent, unsigned int count)
+{
+ int error = -EBADF;
+ struct file * file;
+ struct readdir_callback32 buf;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.count = 0;
+ buf.dirent = dirent;
+
+ error = vfs_readdir(file, (filldir_t)fillonedir, &buf);
+ if (error < 0)
+ goto out_putf;
+ error = buf.count;
+
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+#if 0
+struct linux_dirent32 {
+ u32 d_ino;
+ u32 d_off;
+ unsigned short d_reclen;
+ char d_name[1];
+};
+#else
+struct linux_dirent32 {
+ u32 d_ino;
+ u32 d_off;
+ unsigned short d_reclen;
+ /* unsigned char d_type; */
+ char d_name[256];
+};
+#endif
+
+struct getdents_callback32 {
+ struct linux_dirent32 * current_dir;
+ struct linux_dirent32 * previous;
+ int count;
+ int error;
+};
+
+static int
+filldir(void * __buf, const char * name, int namlen, off_t offset, ino_t ino,
+ unsigned int d_type)
+{
+ struct linux_dirent32 * dirent;
+ struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->current_dir;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(reclen, &dirent->d_reclen);
+ /* put_user(d_type, &dirent->d_type); */
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 *dirent, unsigned int count)
+{
+ struct file * file;
+ struct linux_dirent32 * lastdirent;
+ struct getdents_callback32 buf;
+ int error = -EBADF;
+
+ PPCDBG(PPCDBG_SYS32NI, "sys32_getdents - running - fd=%x, pid=%ld, comm=%s \n", fd, current->pid, current->comm);
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, (filldir_t)filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+ lastdirent = buf.previous;
+ error = buf.error;
+ if(lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = count - buf.count;
+ }
+ out_putf:
+ fput(file);
+
+ out:
+ return error;
+}
+/* end of readdir & getdents */
+
+
+
+/* 32-bit timeval and related flotsam. */
+
+struct timeval32
+{
+ int tv_sec, tv_usec;
+};
+
+struct itimerval32
+{
+ struct timeval32 it_interval;
+ struct timeval32 it_value;
+};
+
+
+
+
+/*
+ * Ooo, nasty. We need here to frob 32-bit unsigned longs to
+ * 64-bit unsigned longs.
+ */
+static inline int
+get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
+{
+ if (ufdset) {
+ unsigned long odd;
+
+ if (verify_area(VERIFY_WRITE, ufdset, n*sizeof(u32)))
+ return -EFAULT;
+
+ odd = n & 1UL;
+ n &= ~1UL;
+ while (n) {
+ unsigned long h, l;
+ __get_user(l, ufdset);
+ __get_user(h, ufdset+1);
+ ufdset += 2;
+ *fdset++ = h << 32 | l;
+ n -= 2;
+ }
+ if (odd)
+ __get_user(*fdset, ufdset);
+ } else {
+ /* Tricky, must clear full unsigned long in the
+ * kernel fdset at the end, this makes sure that
+ * actually happens.
+ */
+ memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32));
+ }
+ return 0;
+}
+
+static inline void
+set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
+{
+ unsigned long odd;
+
+ if (!ufdset)
+ return;
+
+ odd = n & 1UL;
+ n &= ~1UL;
+ while (n) {
+ unsigned long h, l;
+ l = *fdset++;
+ h = l >> 32;
+ __put_user(l, ufdset);
+ __put_user(h, ufdset+1);
+ ufdset += 2;
+ n -= 2;
+ }
+ if (odd)
+ __put_user(*fdset, ufdset);
+}
+
+
+
+#define MAX_SELECT_SECONDS ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
+
+asmlinkage long sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
+{
+ fd_set_bits fds;
+ struct timeval32 *tvp = (struct timeval32 *)AA(tvp_x);
+ char *bits;
+ unsigned long nn;
+ long timeout;
+ int ret, size;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_select - entered - n=%x, inp=%p, outp=%p - pid=%ld comm=%s \n", n, inp, outp, current->pid, current->comm);
+
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (tvp) {
+ time_t sec, usec;
+ if ((ret = verify_area(VERIFY_READ, tvp, sizeof(*tvp)))
+ || (ret = __get_user(sec, &tvp->tv_sec))
+ || (ret = __get_user(usec, &tvp->tv_usec)))
+ goto out_nofds;
+
+ ret = -EINVAL;
+ if(sec < 0 || usec < 0)
+ goto out_nofds;
+
+ if ((unsigned long) sec < MAX_SELECT_SECONDS) {
+ timeout = (usec + 1000000/HZ - 1) / (1000000/HZ);
+ timeout += sec * (unsigned long) HZ;
+ }
+ }
+
+ ret = -EINVAL;
+ if (n < 0)
+ goto out_nofds;
+ if (n > current->files->max_fdset)
+ n = current->files->max_fdset;
+
+ /*
+ * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
+ * since we used fdset we need to allocate memory in units of
+ * long-words.
+ */
+ ret = -ENOMEM;
+ size = FDS_BYTES(n);
+ bits = kmalloc(6 * size, GFP_KERNEL);
+ if (!bits)
+ goto out_nofds;
+ fds.in = (unsigned long *) bits;
+ fds.out = (unsigned long *) (bits + size);
+ fds.ex = (unsigned long *) (bits + 2*size);
+ fds.res_in = (unsigned long *) (bits + 3*size);
+ fds.res_out = (unsigned long *) (bits + 4*size);
+ fds.res_ex = (unsigned long *) (bits + 5*size);
+
+ nn = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
+ if ((ret = get_fd_set32(nn, fds.in, inp)) ||
+ (ret = get_fd_set32(nn, fds.out, outp)) ||
+ (ret = get_fd_set32(nn, fds.ex, exp)))
+ goto out;
+ zero_fd_set(n, fds.res_in);
+ zero_fd_set(n, fds.res_out);
+ zero_fd_set(n, fds.res_ex);
+
+ ret = do_select(n, &fds, &timeout);
+
+ if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
+ time_t sec = 0, usec = 0;
+ if (timeout) {
+ sec = timeout / HZ;
+ usec = timeout % HZ;
+ usec *= (1000000/HZ);
+ }
+ put_user(sec, &tvp->tv_sec);
+ put_user(usec, &tvp->tv_usec);
+ }
+
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ ret = -ERESTARTNOHAND;
+ if (signal_pending(current))
+ goto out;
+ ret = 0;
+ }
+
+ set_fd_set32(nn, inp, fds.res_in);
+ set_fd_set32(nn, outp, fds.res_out);
+ set_fd_set32(nn, exp, fds.res_ex);
+
+out:
+ kfree(bits);
+
+out_nofds:
+ PPCDBG(PPCDBG_SYS32X, "sys32_select - exited - pid=%ld, comm=%s \n", current->pid, current->comm);
+ return ret;
+}
+
+
+
+
+/*
+ * Due to some executables calling the wrong select we sometimes
+ * get wrong args. This determines how the args are being passed
+ * (a single ptr to them all args passed) then calls
+ * sys_select() with the appropriate args. -- Cort
+ */
+/* Note: it is necessary to treat n as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int ppc32_select(u32 n, u32* inp, u32* outp, u32* exp, u32 tvp_x)
+{
+ if ((unsigned int)n >= 4096)
+ panic("ppc32_select - wrong arguments were passed in \n");
+
+ return sys32_select((int)n, inp, outp, exp, tvp_x);
+}
+
+
+
+static int cp_new_stat32(struct inode *inode, struct stat32 *statbuf)
+{
+ unsigned long ino, blksize, blocks;
+ kdev_t dev, rdev;
+ umode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ off_t size;
+ time_t atime, mtime, ctime;
+ int err;
+
+ /* Stream the loads of inode data into the load buffer,
+ * then we push it all into the store buffer below. This
+ * should give optimal cache performance.
+ */
+ ino = inode->i_ino;
+ dev = inode->i_dev;
+ mode = inode->i_mode;
+ nlink = inode->i_nlink;
+ uid = inode->i_uid;
+ gid = inode->i_gid;
+ rdev = inode->i_rdev;
+ size = inode->i_size;
+ atime = inode->i_atime;
+ mtime = inode->i_mtime;
+ ctime = inode->i_ctime;
+ blksize = inode->i_blksize;
+ blocks = inode->i_blocks;
+
+ err = put_user(kdev_t_to_nr(dev), &statbuf->st_dev);
+ err |= put_user(ino, &statbuf->st_ino);
+ err |= put_user(mode, &statbuf->st_mode);
+ err |= put_user(nlink, &statbuf->st_nlink);
+ err |= put_user(uid, &statbuf->st_uid);
+ err |= put_user(gid, &statbuf->st_gid);
+ err |= put_user(kdev_t_to_nr(rdev), &statbuf->st_rdev);
+ err |= put_user(size, &statbuf->st_size);
+ err |= put_user(atime, &statbuf->st_atime);
+ err |= put_user(0, &statbuf->__unused1);
+ err |= put_user(mtime, &statbuf->st_mtime);
+ err |= put_user(0, &statbuf->__unused2);
+ err |= put_user(ctime, &statbuf->st_ctime);
+ err |= put_user(0, &statbuf->__unused3);
+ if (blksize) {
+ err |= put_user(blksize, &statbuf->st_blksize);
+ err |= put_user(blocks, &statbuf->st_blocks);
+ } else {
+ unsigned int tmp_blocks;
+
+#define D_B 7
+#define I_B (BLOCK_SIZE / sizeof(unsigned short))
+ tmp_blocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ if (tmp_blocks > D_B) {
+ unsigned int indirect;
+
+ indirect = (tmp_blocks - D_B + I_B - 1) / I_B;
+ tmp_blocks += indirect;
+ if (indirect > 1) {
+ indirect = (indirect - 1 + I_B - 1) / I_B;
+ tmp_blocks += indirect;
+ if (indirect > 1)
+ tmp_blocks++;
+ }
+ }
+ err |= put_user(BLOCK_SIZE, &statbuf->st_blksize);
+ err |= put_user((BLOCK_SIZE / 512) * tmp_blocks, &statbuf->st_blocks);
+#undef D_B
+#undef I_B
+ }
+ err |= put_user(0, &statbuf->__unused4[0]);
+ err |= put_user(0, &statbuf->__unused4[1]);
+
+ return err;
+}
+
+static __inline__ int
+do_revalidate(struct dentry *dentry)
+{
+ struct inode * inode = dentry->d_inode;
+ if (inode->i_op && inode->i_op->revalidate)
+ return inode->i_op->revalidate(dentry);
+ return 0;
+}
+
+asmlinkage long sys32_newstat(char* filename, struct stat32* statbuf)
+{
+ struct nameidata nd;
+ int error;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_newstat - running - filename=%s, statbuf=%p, pid=%ld, comm=%s\n", filename, statbuf, current->pid, current->comm);
+
+ error = user_path_walk(filename, &nd);
+ if (!error) {
+ error = do_revalidate(nd.dentry);
+ if (!error)
+ error = cp_new_stat32(nd.dentry->d_inode, statbuf);
+ path_release(&nd);
+ }
+ return error;
+}
+
+asmlinkage long sys32_newlstat(char * filename, struct stat32 *statbuf)
+{
+ struct nameidata nd;
+ int error;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_newlstat - running - fn=%s, pid=%ld, comm=%s\n", filename, current->pid, current->comm);
+
+ error = user_path_walk_link(filename, &nd);
+ if (!error) {
+ error = do_revalidate(nd.dentry);
+ if (!error)
+ error = cp_new_stat32(nd.dentry->d_inode, statbuf);
+
+ path_release(&nd);
+ }
+ return error;
+}
+
+asmlinkage long sys32_newfstat(unsigned int fd, struct stat32 *statbuf)
+{
+ struct file *f;
+ int err = -EBADF;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_newfstat - running - fd=%x, pid=%ld, comm=%s\n", fd, current->pid, current->comm);
+
+ f = fget(fd);
+ if (f) {
+ struct dentry * dentry = f->f_dentry;
+
+ err = do_revalidate(dentry);
+ if (!err)
+ err = cp_new_stat32(dentry->d_inode, statbuf);
+ fput(f);
+ }
+ return err;
+}
+
+static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf)
+{
+ int err;
+
+ err = put_user (kbuf->f_type, &ubuf->f_type);
+ err |= __put_user (kbuf->f_bsize, &ubuf->f_bsize);
+ err |= __put_user (kbuf->f_blocks, &ubuf->f_blocks);
+ err |= __put_user (kbuf->f_bfree, &ubuf->f_bfree);
+ err |= __put_user (kbuf->f_bavail, &ubuf->f_bavail);
+ err |= __put_user (kbuf->f_files, &ubuf->f_files);
+ err |= __put_user (kbuf->f_ffree, &ubuf->f_ffree);
+ err |= __put_user (kbuf->f_namelen, &ubuf->f_namelen);
+ err |= __put_user (kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]);
+ err |= __put_user (kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]);
+ return err;
+}
+
+extern asmlinkage int sys_statfs(const char * path, struct statfs * buf);
+
+asmlinkage long sys32_statfs(const char * path, struct statfs32 *buf)
+{
+ int ret;
+ struct statfs s;
+ mm_segment_t old_fs = get_fs();
+ char *pth;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_statfs - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ pth = getname32 (path);
+ ret = PTR_ERR(pth);
+ if (!IS_ERR(pth)) {
+ set_fs (KERNEL_DS);
+ ret = sys_statfs((const char *)pth, &s);
+ set_fs (old_fs);
+ putname (pth);
+ if (put_statfs(buf, &s))
+ return -EFAULT;
+ }
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_statfs - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return ret;
+}
+
+extern asmlinkage long sys_fstatfs(unsigned int fd, struct statfs * buf);
+
+asmlinkage long sys32_fstatfs(unsigned int fd, struct statfs32 *buf)
+{
+ int ret;
+ struct statfs s;
+ mm_segment_t old_fs = get_fs();
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_fstatfs - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ set_fs (KERNEL_DS);
+ ret = sys_fstatfs(fd, &s);
+ set_fs (old_fs);
+ if (put_statfs(buf, &s))
+ return -EFAULT;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_fstatfs - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return ret;
+}
+
+
+
+extern asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2);
+
+/* Note: it is necessary to treat option as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sysfs(u32 option, u32 arg1, u32 arg2)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_sysfs - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+ return sys_sysfs((int)option, arg1, arg2);
+}
+
+
+
+
+extern unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+
+asmlinkage unsigned long sys32_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len,
+ unsigned long flags, u32 __new_addr)
+{
+ unsigned long ret = -EINVAL;
+ unsigned long new_addr = AA(__new_addr);
+
+ PPCDBG(PPCDBG_SYS32, "sys32_mremap - entered - pid=%ld current=%lx comm=%s\n",
+ current->pid, current, current->comm);
+
+
+ if (old_len > 0xf0000000UL || new_len > 0xf0000000UL)
+ goto out;
+ if (addr > 0xf0000000UL - old_len)
+ goto out;
+ down_write(¤t->mm->mmap_sem);
+ if (flags & MREMAP_FIXED) {
+ if (new_addr > 0xf0000000UL - new_len)
+ goto out_sem;
+ } else if (addr > 0xf0000000UL - new_len) {
+ ret = -ENOMEM;
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out_sem;
+ new_addr = get_unmapped_area (NULL, addr, new_len, 0, 0);
+ if (!new_addr)
+ goto out_sem;
+ flags |= MREMAP_FIXED;
+ }
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+out_sem:
+ up_write(¤t->mm->mmap_sem);
+out:
+
+ PPCDBG(PPCDBG_SYS32, "sys32_mremap - exited - pid=%ld current=%lx comm=%s\n",
+ current->pid, current, current->comm);
+
+ return ret;
+}
+
+
+
+/* Handle adjtimex compatability. */
+struct timex32 {
+ u32 modes;
+ s32 offset, freq, maxerror, esterror;
+ s32 status, constant, precision, tolerance;
+ struct timeval32 time;
+ s32 tick;
+ s32 ppsfreq, jitter, shift, stabil;
+ s32 jitcnt, calcnt, errcnt, stbcnt;
+ s32 :32; s32 :32; s32 :32; s32 :32;
+ s32 :32; s32 :32; s32 :32; s32 :32;
+ s32 :32; s32 :32; s32 :32; s32 :32;
+};
+
+extern int do_adjtimex(struct timex *);
+extern void ppc_adjtimex(void);
+
+asmlinkage long sys32_adjtimex(struct timex32 *utp)
+{
+ struct timex txc;
+ int ret;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_adjtimex - running - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ memset(&txc, 0, sizeof(struct timex));
+
+ if(get_user(txc.modes, &utp->modes) ||
+ __get_user(txc.offset, &utp->offset) ||
+ __get_user(txc.freq, &utp->freq) ||
+ __get_user(txc.maxerror, &utp->maxerror) ||
+ __get_user(txc.esterror, &utp->esterror) ||
+ __get_user(txc.status, &utp->status) ||
+ __get_user(txc.constant, &utp->constant) ||
+ __get_user(txc.precision, &utp->precision) ||
+ __get_user(txc.tolerance, &utp->tolerance) ||
+ __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+ __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+ __get_user(txc.tick, &utp->tick) ||
+ __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+ __get_user(txc.jitter, &utp->jitter) ||
+ __get_user(txc.shift, &utp->shift) ||
+ __get_user(txc.stabil, &utp->stabil) ||
+ __get_user(txc.jitcnt, &utp->jitcnt) ||
+ __get_user(txc.calcnt, &utp->calcnt) ||
+ __get_user(txc.errcnt, &utp->errcnt) ||
+ __get_user(txc.stbcnt, &utp->stbcnt))
+ return -EFAULT;
+
+ ret = do_adjtimex(&txc);
+
+ /* adjust the conversion of TB to time of day to track adjtimex */
+ ppc_adjtimex();
+
+ if(put_user(txc.modes, &utp->modes) ||
+ __put_user(txc.offset, &utp->offset) ||
+ __put_user(txc.freq, &utp->freq) ||
+ __put_user(txc.maxerror, &utp->maxerror) ||
+ __put_user(txc.esterror, &utp->esterror) ||
+ __put_user(txc.status, &utp->status) ||
+ __put_user(txc.constant, &utp->constant) ||
+ __put_user(txc.precision, &utp->precision) ||
+ __put_user(txc.tolerance, &utp->tolerance) ||
+ __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+ __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+ __put_user(txc.tick, &utp->tick) ||
+ __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+ __put_user(txc.jitter, &utp->jitter) ||
+ __put_user(txc.shift, &utp->shift) ||
+ __put_user(txc.stabil, &utp->stabil) ||
+ __put_user(txc.jitcnt, &utp->jitcnt) ||
+ __put_user(txc.calcnt, &utp->calcnt) ||
+ __put_user(txc.errcnt, &utp->errcnt) ||
+ __put_user(txc.stbcnt, &utp->stbcnt))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+
+
+#ifdef CONFIG_MODULES
+
+extern asmlinkage unsigned long sys_create_module(const char *name_user, size_t size);
+
+asmlinkage unsigned long sys32_create_module(const char *name_user, __kernel_size_t32 size)
+{
+
+ PPCDBG(PPCDBG_SYS32M, "sys32_create_module - running - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return sys_create_module(name_user, (size_t)size);
+}
+
+
+
+extern asmlinkage long sys_init_module(const char *name_user, struct module *mod_user);
+
+asmlinkage long sys32_init_module(const char *name_user, struct module *mod_user)
+{
+
+ PPCDBG(PPCDBG_SYS32, "sys32_init_module - running - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return sys_init_module(name_user, mod_user);
+}
+
+
+
+extern asmlinkage long sys_delete_module(const char *name_user);
+
+asmlinkage long sys32_delete_module(const char *name_user)
+{
+
+ PPCDBG(PPCDBG_SYS32, "sys32_delete_module - running - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return sys_delete_module(name_user);
+}
+
+
+
+struct module_info32 {
+ u32 addr;
+ u32 size;
+ u32 flags;
+ s32 usecount;
+};
+
+/* Query various bits about modules. */
+
+static inline long
+get_mod_name(const char *user_name, char **buf)
+{
+ unsigned long page;
+ long retval;
+
+ if ((unsigned long)user_name >= TASK_SIZE
+ && !segment_eq(get_fs (), KERNEL_DS))
+ return -EFAULT;
+
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ retval = strncpy_from_user((char *)page, user_name, PAGE_SIZE);
+ if (retval > 0) {
+ if (retval < PAGE_SIZE) {
+ *buf = (char *)page;
+ return retval;
+ }
+ retval = -ENAMETOOLONG;
+ } else if (!retval)
+ retval = -EINVAL;
+
+ free_page(page);
+ return retval;
+}
+
+static inline void
+put_mod_name(char *buf)
+{
+ free_page((unsigned long)buf);
+}
+
+static __inline__ struct module *find_module(const char *name)
+{
+ struct module *mod;
+
+ for (mod = module_list; mod ; mod = mod->next) {
+ if (mod->flags & MOD_DELETED)
+ continue;
+ if (!strcmp(mod->name, name))
+ break;
+ }
+
+ return mod;
+}
+
+static int
+qm_modules(char *buf, size_t bufsize, __kernel_size_t32 *ret)
+{
+ struct module *mod;
+ size_t nmod, space, len;
+
+ nmod = space = 0;
+
+ for (mod = module_list; mod->next != NULL; mod = mod->next, ++nmod) {
+ len = strlen(mod->name)+1;
+ if (len > bufsize)
+ goto calc_space_needed;
+ if (copy_to_user(buf, mod->name, len))
+ return -EFAULT;
+ buf += len;
+ bufsize -= len;
+ space += len;
+ }
+
+ if (put_user(nmod, ret))
+ return -EFAULT;
+ else
+ return 0;
+
+calc_space_needed:
+ space += len;
+ while ((mod = mod->next)->next != NULL)
+ space += strlen(mod->name)+1;
+
+ if (put_user(space, ret))
+ return -EFAULT;
+ else
+ return -ENOSPC;
+}
+
+static int
+qm_deps(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
+{
+ size_t i, space, len;
+
+ if (mod->next == NULL)
+ return -EINVAL;
+ if (!MOD_CAN_QUERY(mod))
+ return put_user(0, ret);
+
+ space = 0;
+ for (i = 0; i < mod->ndeps; ++i) {
+ const char *dep_name = mod->deps[i].dep->name;
+
+ len = strlen(dep_name)+1;
+ if (len > bufsize)
+ goto calc_space_needed;
+ if (copy_to_user(buf, dep_name, len))
+ return -EFAULT;
+ buf += len;
+ bufsize -= len;
+ space += len;
+ }
+
+ return put_user(i, ret);
+
+calc_space_needed:
+ space += len;
+ while (++i < mod->ndeps)
+ space += strlen(mod->deps[i].dep->name)+1;
+
+ if (put_user(space, ret))
+ return -EFAULT;
+ else
+ return -ENOSPC;
+}
+
+static int
+qm_refs(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
+{
+ size_t nrefs, space, len;
+ struct module_ref *ref;
+
+ if (mod->next == NULL)
+ return -EINVAL;
+ if (!MOD_CAN_QUERY(mod))
+ if (put_user(0, ret))
+ return -EFAULT;
+ else
+ return 0;
+
+ space = 0;
+ for (nrefs = 0, ref = mod->refs; ref ; ++nrefs, ref = ref->next_ref) {
+ const char *ref_name = ref->ref->name;
+
+ len = strlen(ref_name)+1;
+ if (len > bufsize)
+ goto calc_space_needed;
+ if (copy_to_user(buf, ref_name, len))
+ return -EFAULT;
+ buf += len;
+ bufsize -= len;
+ space += len;
+ }
+
+ if (put_user(nrefs, ret))
+ return -EFAULT;
+ else
+ return 0;
+
+calc_space_needed:
+ space += len;
+ while ((ref = ref->next_ref) != NULL)
+ space += strlen(ref->ref->name)+1;
+
+ if (put_user(space, ret))
+ return -EFAULT;
+ else
+ return -ENOSPC;
+}
+
+static inline int
+qm_symbols(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
+{
+ size_t i, space, len;
+ struct module_symbol *s;
+ char *strings;
+ unsigned *vals;
+
+ if (!MOD_CAN_QUERY(mod))
+ if (put_user(0, ret))
+ return -EFAULT;
+ else
+ return 0;
+
+ space = mod->nsyms * 2*sizeof(u32);
+
+ i = len = 0;
+ s = mod->syms;
+
+ if (space > bufsize)
+ goto calc_space_needed;
+
+ if (!access_ok(VERIFY_WRITE, buf, space))
+ return -EFAULT;
+
+ bufsize -= space;
+ vals = (unsigned *)buf;
+ strings = buf+space;
+
+ for (; i < mod->nsyms ; ++i, ++s, vals += 2) {
+ len = strlen(s->name)+1;
+ if (len > bufsize)
+ goto calc_space_needed;
+
+ if (copy_to_user(strings, s->name, len)
+ || __put_user(s->value, vals+0)
+ || __put_user(space, vals+1))
+ return -EFAULT;
+
+ strings += len;
+ bufsize -= len;
+ space += len;
+ }
+
+ if (put_user(i, ret))
+ return -EFAULT;
+ else
+ return 0;
+
+calc_space_needed:
+ for (; i < mod->nsyms; ++i, ++s)
+ space += strlen(s->name)+1;
+
+ if (put_user(space, ret))
+ return -EFAULT;
+ else
+ return -ENOSPC;
+}
+
+static inline int
+qm_info(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
+{
+ int error = 0;
+
+ if (mod->next == NULL)
+ return -EINVAL;
+
+ if (sizeof(struct module_info32) <= bufsize) {
+ struct module_info32 info;
+ info.addr = (unsigned long)mod;
+ info.size = mod->size;
+ info.flags = mod->flags;
+ info.usecount =
+ ((mod_member_present(mod, can_unload)
+ && mod->can_unload)
+ ? -1 : atomic_read(&mod->uc.usecount));
+
+ if (copy_to_user(buf, &info, sizeof(struct module_info32)))
+ return -EFAULT;
+ } else
+ error = -ENOSPC;
+
+ if (put_user(sizeof(struct module_info32), ret))
+ return -EFAULT;
+
+ return error;
+}
+
+/* Note: it is necessary to treat which as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_query_module(char *name_user, u32 which, char *buf, __kernel_size_t32 bufsize, u32 ret)
+{
+ struct module *mod;
+ int err;
+
+ PPCDBG(PPCDBG_SYS32M, "sys32_query_module - entered - pid=%ld current=%lx comm=%s\n",
+ current->pid, current, current->comm);
+
+ lock_kernel();
+ if (name_user == 0) {
+ /* This finds "kernel_module" which is not exported. */
+ for(mod = module_list; mod->next != NULL; mod = mod->next)
+ ;
+ } else {
+ long namelen;
+ char *name;
+
+ if ((namelen = get_mod_name(name_user, &name)) < 0) {
+ err = namelen;
+ goto out;
+ }
+ err = -ENOENT;
+ if (namelen == 0) {
+ /* This finds "kernel_module" which is not exported. */
+ for(mod = module_list; mod->next != NULL; mod = mod->next)
+ ;
+ } else if ((mod = find_module(name)) == NULL) {
+ put_mod_name(name);
+ goto out;
+ }
+ put_mod_name(name);
+ }
+
+ switch ((int)which)
+ {
+ case 0:
+ err = 0;
+ break;
+ case QM_MODULES:
+ err = qm_modules(buf, bufsize, (__kernel_size_t32 *)AA(ret));
+ break;
+ case QM_DEPS:
+ err = qm_deps(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret));
+ break;
+ case QM_REFS:
+ err = qm_refs(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret));
+ break;
+ case QM_SYMBOLS:
+ err = qm_symbols(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret));
+ break;
+ case QM_INFO:
+ err = qm_info(mod, buf, bufsize, (__kernel_size_t32 *)AA(ret));
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+out:
+ unlock_kernel();
+
+ PPCDBG(PPCDBG_SYS32, "sys32_query_module - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return err;
+}
+
+
+
+struct kernel_sym32 {
+ u32 value;
+ char name[60];
+};
+
+extern asmlinkage long sys_get_kernel_syms(struct kernel_sym *table);
+
+asmlinkage long sys32_get_kernel_syms(struct kernel_sym32 *table)
+{
+ int len, i;
+ struct kernel_sym *tbl;
+ mm_segment_t old_fs;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_get_kernel_syms - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+
+ len = sys_get_kernel_syms(NULL);
+ if (!table) return len;
+ tbl = kmalloc (len * sizeof (struct kernel_sym), GFP_KERNEL);
+ if (!tbl) return -ENOMEM;
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ sys_get_kernel_syms(tbl);
+ set_fs (old_fs);
+ for (i = 0; i < len; i++, table += sizeof (struct kernel_sym32)) {
+ if (put_user (tbl[i].value, &table->value) ||
+ copy_to_user (table->name, tbl[i].name, 60))
+ break;
+ }
+ kfree (tbl);
+
+ PPCDBG(PPCDBG_SYS32, "sys32_get_kernel_syms - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return i;
+}
+
+#else /* CONFIG_MODULES */
+
+asmlinkage unsigned long sys32_create_module(const char *name_user, size_t size)
+{
+
+ PPCDBG(PPCDBG_SYS32, "sys32_create_module - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ return -ENOSYS;
+}
+
+asmlinkage long sys32_init_module(const char *name_user, struct module *mod_user)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_init_module - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ return -ENOSYS;
+}
+
+asmlinkage long sys32_delete_module(const char *name_user)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_delete_module - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ return -ENOSYS;
+}
+
+/* Note: it is necessary to treat which as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_query_module(const char *name_user, u32 which, char *buf, size_t bufsize, size_t *ret)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_query_module - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ /* Let the program know about the new interface. Not that it'll do them much good. */
+ if ((int)which == 0)
+ return 0;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_query_module - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+ return -ENOSYS;
+}
+
+asmlinkage long sys32_get_kernel_syms(struct kernel_sym *table)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_get_kernel_syms - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_MODULES */
+
+
+
+/* Stuff for NFS server syscalls... */
+struct nfsctl_svc32 {
+ u16 svc32_port;
+ s32 svc32_nthreads;
+};
+
+struct nfsctl_client32 {
+ s8 cl32_ident[NFSCLNT_IDMAX+1];
+ s32 cl32_naddr;
+ struct in_addr cl32_addrlist[NFSCLNT_ADDRMAX];
+ s32 cl32_fhkeytype;
+ s32 cl32_fhkeylen;
+ u8 cl32_fhkey[NFSCLNT_KEYMAX];
+};
+
+struct nfsctl_export32 {
+ s8 ex32_client[NFSCLNT_IDMAX+1];
+ s8 ex32_path[NFS_MAXPATHLEN+1];
+ __kernel_dev_t32 ex32_dev;
+ __kernel_ino_t32 ex32_ino;
+ s32 ex32_flags;
+ __kernel_uid_t32 ex32_anon_uid;
+ __kernel_gid_t32 ex32_anon_gid;
+};
+
+struct nfsctl_uidmap32 {
+ u32 ug32_ident; /* char * */
+ __kernel_uid_t32 ug32_uidbase;
+ s32 ug32_uidlen;
+ u32 ug32_udimap; /* uid_t * */
+ __kernel_uid_t32 ug32_gidbase;
+ s32 ug32_gidlen;
+ u32 ug32_gdimap; /* gid_t * */
+};
+
+struct nfsctl_fhparm32 {
+ struct sockaddr gf32_addr;
+ __kernel_dev_t32 gf32_dev;
+ __kernel_ino_t32 gf32_ino;
+ s32 gf32_version;
+};
+
+struct nfsctl_fdparm32 {
+ struct sockaddr gd32_addr;
+ s8 gd32_path[NFS_MAXPATHLEN+1];
+ s32 gd32_version;
+};
+
+struct nfsctl_fsparm32 {
+ struct sockaddr gd32_addr;
+ s8 gd32_path[NFS_MAXPATHLEN+1];
+ s32 gd32_maxlen;
+};
+
+struct nfsctl_arg32 {
+ s32 ca32_version; /* safeguard */
+ union {
+ struct nfsctl_svc32 u32_svc;
+ struct nfsctl_client32 u32_client;
+ struct nfsctl_export32 u32_export;
+ struct nfsctl_uidmap32 u32_umap;
+ struct nfsctl_fhparm32 u32_getfh;
+ struct nfsctl_fdparm32 u32_getfd;
+ struct nfsctl_fsparm32 u32_getfs;
+ } u;
+#define ca32_svc u.u32_svc
+#define ca32_client u.u32_client
+#define ca32_export u.u32_export
+#define ca32_umap u.u32_umap
+#define ca32_getfh u.u32_getfh
+#define ca32_getfd u.u32_getfd
+#define ca32_getfs u.u32_getfs
+#define ca32_authd u.u32_authd
+};
+
+union nfsctl_res32 {
+ __u8 cr32_getfh[NFS_FHSIZE];
+ struct knfsd_fh cr32_getfs;
+};
+
+static int nfs_svc32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ int err;
+
+ err = __get_user(karg->ca_version, &arg32->ca32_version);
+ err |= __get_user(karg->ca_svc.svc_port, &arg32->ca32_svc.svc32_port);
+ err |= __get_user(karg->ca_svc.svc_nthreads, &arg32->ca32_svc.svc32_nthreads);
+ return err;
+}
+
+static int nfs_clnt32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ int err;
+
+ err = __get_user(karg->ca_version, &arg32->ca32_version);
+ err |= copy_from_user(&karg->ca_client.cl_ident[0],
+ &arg32->ca32_client.cl32_ident[0],
+ NFSCLNT_IDMAX);
+ err |= __get_user(karg->ca_client.cl_naddr, &arg32->ca32_client.cl32_naddr);
+ err |= copy_from_user(&karg->ca_client.cl_addrlist[0],
+ &arg32->ca32_client.cl32_addrlist[0],
+ (sizeof(struct in_addr) * NFSCLNT_ADDRMAX));
+ err |= __get_user(karg->ca_client.cl_fhkeytype,
+ &arg32->ca32_client.cl32_fhkeytype);
+ err |= __get_user(karg->ca_client.cl_fhkeylen,
+ &arg32->ca32_client.cl32_fhkeylen);
+ err |= copy_from_user(&karg->ca_client.cl_fhkey[0],
+ &arg32->ca32_client.cl32_fhkey[0],
+ NFSCLNT_KEYMAX);
+
+ if(err) return -EFAULT;
+ return 0;
+}
+
+static int nfs_exp32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ int err;
+
+ err = __get_user(karg->ca_version, &arg32->ca32_version);
+ err |= copy_from_user(&karg->ca_export.ex_client[0],
+ &arg32->ca32_export.ex32_client[0],
+ NFSCLNT_IDMAX);
+ err |= copy_from_user(&karg->ca_export.ex_path[0],
+ &arg32->ca32_export.ex32_path[0],
+ NFS_MAXPATHLEN);
+ err |= __get_user(karg->ca_export.ex_dev,
+ &arg32->ca32_export.ex32_dev);
+ err |= __get_user(karg->ca_export.ex_ino,
+ &arg32->ca32_export.ex32_ino);
+ err |= __get_user(karg->ca_export.ex_flags,
+ &arg32->ca32_export.ex32_flags);
+ err |= __get_user(karg->ca_export.ex_anon_uid,
+ &arg32->ca32_export.ex32_anon_uid);
+ err |= __get_user(karg->ca_export.ex_anon_gid,
+ &arg32->ca32_export.ex32_anon_gid);
+ karg->ca_export.ex_anon_uid = karg->ca_export.ex_anon_uid;
+ karg->ca_export.ex_anon_gid = karg->ca_export.ex_anon_gid;
+
+ if(err) return -EFAULT;
+ return 0;
+}
+
+static int nfs_uud32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ u32 uaddr;
+ int i;
+ int err;
+
+ memset(karg, 0, sizeof(*karg));
+ if(__get_user(karg->ca_version, &arg32->ca32_version))
+ return -EFAULT;
+ karg->ca_umap.ug_ident = (char *)get_free_page(GFP_USER);
+ if(!karg->ca_umap.ug_ident)
+ return -ENOMEM;
+ err = __get_user(uaddr, &arg32->ca32_umap.ug32_ident);
+ if(strncpy_from_user(karg->ca_umap.ug_ident,
+ (char *)A(uaddr), PAGE_SIZE) <= 0)
+ return -EFAULT;
+ err |= __get_user(karg->ca_umap.ug_uidbase,
+ &arg32->ca32_umap.ug32_uidbase);
+ err |= __get_user(karg->ca_umap.ug_uidlen,
+ &arg32->ca32_umap.ug32_uidlen);
+ err |= __get_user(uaddr, &arg32->ca32_umap.ug32_udimap);
+ if (err)
+ return -EFAULT;
+ karg->ca_umap.ug_udimap = kmalloc((sizeof(uid_t) * karg->ca_umap.ug_uidlen),
+ GFP_USER);
+ if(!karg->ca_umap.ug_udimap)
+ return -ENOMEM;
+ for(i = 0; i < karg->ca_umap.ug_uidlen; i++)
+ err |= __get_user(karg->ca_umap.ug_udimap[i],
+ &(((__kernel_uid_t32 *)A(uaddr))[i]));
+ err |= __get_user(karg->ca_umap.ug_gidbase,
+ &arg32->ca32_umap.ug32_gidbase);
+ err |= __get_user(karg->ca_umap.ug_uidlen,
+ &arg32->ca32_umap.ug32_gidlen);
+ err |= __get_user(uaddr, &arg32->ca32_umap.ug32_gdimap);
+ if (err)
+ return -EFAULT;
+ karg->ca_umap.ug_gdimap = kmalloc((sizeof(gid_t) * karg->ca_umap.ug_uidlen),
+ GFP_USER);
+ if(!karg->ca_umap.ug_gdimap)
+ return -ENOMEM;
+ for(i = 0; i < karg->ca_umap.ug_gidlen; i++)
+ err |= __get_user(karg->ca_umap.ug_gdimap[i],
+ &(((__kernel_gid_t32 *)A(uaddr))[i]));
+
+ return err;
+}
+
+static int nfs_getfh32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ int err;
+
+ err = __get_user(karg->ca_version, &arg32->ca32_version);
+ err |= copy_from_user(&karg->ca_getfh.gf_addr,
+ &arg32->ca32_getfh.gf32_addr,
+ (sizeof(struct sockaddr)));
+ err |= __get_user(karg->ca_getfh.gf_dev,
+ &arg32->ca32_getfh.gf32_dev);
+ err |= __get_user(karg->ca_getfh.gf_ino,
+ &arg32->ca32_getfh.gf32_ino);
+ err |= __get_user(karg->ca_getfh.gf_version,
+ &arg32->ca32_getfh.gf32_version);
+
+ if(err) return -EFAULT;
+ return 0;
+}
+
+static int nfs_getfd32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ int err;
+
+ err = __get_user(karg->ca_version, &arg32->ca32_version);
+ err |= copy_from_user(&karg->ca_getfd.gd_addr,
+ &arg32->ca32_getfd.gd32_addr,
+ (sizeof(struct sockaddr)));
+ err |= copy_from_user(&karg->ca_getfd.gd_path,
+ &arg32->ca32_getfd.gd32_path,
+ (NFS_MAXPATHLEN+1));
+ err |= __get_user(karg->ca_getfd.gd_version,
+ &arg32->ca32_getfd.gd32_version);
+
+ if(err) return -EFAULT;
+ return 0;
+}
+
+static int nfs_getfs32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
+{
+ int err;
+
+ err = __get_user(karg->ca_version, &arg32->ca32_version);
+ err |= copy_from_user(&karg->ca_getfs.gd_addr,
+ &arg32->ca32_getfs.gd32_addr,
+ (sizeof(struct sockaddr)));
+ err |= copy_from_user(&karg->ca_getfs.gd_path,
+ &arg32->ca32_getfs.gd32_path,
+ (NFS_MAXPATHLEN+1));
+ err |= __get_user(karg->ca_getfs.gd_maxlen,
+ &arg32->ca32_getfs.gd32_maxlen);
+
+ if(err) return -EFAULT;
+ return 0;
+}
+
+/* This really doesn't need translations, we are only passing
+ * back a union which contains opaque nfs file handle data.
+ */
+static int nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32)
+{
+ int err;
+
+ err = copy_to_user(res32, kres, sizeof(*res32));
+
+ if(err) return -EFAULT;
+ return 0;
+}
+
+/* Note: it is necessary to treat cmd_parm as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+int asmlinkage sys32_nfsservctl(u32 cmd_parm, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32)
+{
+ int cmd = (int)cmd_parm;
+ struct nfsctl_arg *karg = NULL;
+ union nfsctl_res *kres = NULL;
+ mm_segment_t oldfs;
+ int err;
+
+ karg = kmalloc(sizeof(*karg), GFP_USER);
+ if(!karg)
+ return -ENOMEM;
+ if(res32) {
+ kres = kmalloc(sizeof(*kres), GFP_USER);
+ if(!kres) {
+ kfree(karg);
+ return -ENOMEM;
+ }
+ }
+ switch(cmd) {
+ case NFSCTL_SVC:
+ err = nfs_svc32_trans(karg, arg32);
+ break;
+ case NFSCTL_ADDCLIENT:
+ err = nfs_clnt32_trans(karg, arg32);
+ break;
+ case NFSCTL_DELCLIENT:
+ err = nfs_clnt32_trans(karg, arg32);
+ break;
+ case NFSCTL_EXPORT:
+ case NFSCTL_UNEXPORT:
+ err = nfs_exp32_trans(karg, arg32);
+ break;
+ /* This one is unimplemented, be we're ready for it. */
+ case NFSCTL_UGIDUPDATE:
+ err = nfs_uud32_trans(karg, arg32);
+ break;
+ case NFSCTL_GETFH:
+ err = nfs_getfh32_trans(karg, arg32);
+ break;
+ case NFSCTL_GETFD:
+ err = nfs_getfd32_trans(karg, arg32);
+ break;
+ case NFSCTL_GETFS:
+ err = nfs_getfs32_trans(karg, arg32);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if(err)
+ goto done;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_nfsservctl(cmd, karg, kres);
+ set_fs(oldfs);
+
+ if (err)
+ goto done;
+
+ if((cmd == NFSCTL_GETFH) ||
+ (cmd == NFSCTL_GETFD) ||
+ (cmd == NFSCTL_GETFS))
+ err = nfs_getfh32_res_trans(kres, res32);
+
+done:
+ if(karg) {
+ if(cmd == NFSCTL_UGIDUPDATE) {
+ if(karg->ca_umap.ug_ident)
+ kfree(karg->ca_umap.ug_ident);
+ if(karg->ca_umap.ug_udimap)
+ kfree(karg->ca_umap.ug_udimap);
+ if(karg->ca_umap.ug_gdimap)
+ kfree(karg->ca_umap.ug_gdimap);
+ }
+ kfree(karg);
+ }
+ if(kres)
+ kfree(kres);
+ return err;
+}
+
+
+
+struct timespec32 {
+ s32 tv_sec;
+ s32 tv_nsec;
+};
+
+extern asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp);
+
+asmlinkage long sys32_nanosleep(struct timespec32 *rqtp, struct timespec32 *rmtp)
+{
+ struct timespec t;
+ int ret;
+ mm_segment_t old_fs = get_fs ();
+
+ PPCDBG(PPCDBG_SYS32NI, "sys32_nanosleep - running - pid=%ld, comm=%s \n", current->pid, current->comm);
+
+ if (get_user (t.tv_sec, &rqtp->tv_sec) ||
+ __get_user (t.tv_nsec, &rqtp->tv_nsec))
+ return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_nanosleep(&t, rmtp ? &t : NULL);
+ set_fs (old_fs);
+ if (rmtp && ret == -EINTR) {
+ if (__put_user (t.tv_sec, &rmtp->tv_sec) ||
+ __put_user (t.tv_nsec, &rmtp->tv_nsec))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+
+
+
+/* These are here just in case some old sparc32 binary calls it. */
+asmlinkage long sys32_pause(void)
+{
+
+ PPCDBG(PPCDBG_SYS32, "sys32_pause - running - pid=%ld, comm=%s \n", current->pid, current->comm);
+
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+
+ return -ERESTARTNOHAND;
+}
+
+
+
+static inline long get_it32(struct itimerval *o, struct itimerval32 *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
+ __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
+ __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
+ __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
+}
+
+static inline long put_it32(struct itimerval32 *o, struct itimerval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
+ __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
+ __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
+ __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
+}
+
+static inline long get_tv32(struct timeval *o, struct timeval32 *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->tv_sec, &i->tv_sec) |
+ __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long put_tv32(struct timeval32 *o, struct timeval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->tv_sec, &o->tv_sec) |
+ __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+
+
+
+extern int do_getitimer(int which, struct itimerval *value);
+
+/* Note: it is necessary to treat which as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_getitimer(u32 which, struct itimerval32 *it)
+{
+ struct itimerval kit;
+ int error;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_getitimer - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ error = do_getitimer((int)which, &kit);
+ if (!error && put_it32(it, &kit))
+ error = -EFAULT;
+
+
+ PPCDBG(PPCDBG_SYS32, "sys32_getitimer - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+ return error;
+}
+
+
+
+extern int do_setitimer(int which, struct itimerval *, struct itimerval *);
+
+/* Note: it is necessary to treat which as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_setitimer(u32 which, struct itimerval32 *in, struct itimerval32 *out)
+{
+ struct itimerval kin, kout;
+ int error;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_setitimer - entered - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ if (in) {
+ if (get_it32(&kin, in))
+ return -EFAULT;
+ } else
+ memset(&kin, 0, sizeof(kin));
+
+ error = do_setitimer((int)which, &kin, out ? &kout : NULL);
+ if (error || !out)
+ return error;
+ if (put_it32(out, &kout))
+ return -EFAULT;
+
+
+ PPCDBG(PPCDBG_SYS32, "sys32_setitimer - exited - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+ return 0;
+}
+
+#define RLIM_INFINITY32 0xffffffff
+#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
+
+struct rlimit32 {
+ u32 rlim_cur;
+ u32 rlim_max;
+};
+
+extern asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim);
+asmlinkage long sys32_getrlimit(unsigned int resource, struct rlimit32 *rlim)
+{
+ struct rlimit r;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getrlimit(resource, &r);
+ set_fs(old_fs);
+ if (!ret) {
+ ret = put_user(RESOURCE32(r.rlim_cur), &rlim->rlim_cur);
+ ret |= __put_user(RESOURCE32(r.rlim_max), &rlim->rlim_max);
+ }
+
+ return ret;
+}
+
+/* Back compatibility for getrlimit. Needed for some apps. */
+asmlinkage long sys32_old_getrlimit(unsigned int resource, struct rlimit32* rlim)
+{
+ struct rlimit x; // 64-bit version of the resource limits.
+ struct rlimit32 x32; // 32-bit version of the resource limits.
+ long rc = 0;
+
+ if (resource >= RLIM_NLIMITS) {
+ PPCDBG(PPCDBG_SYS32, "sys32_old_getrlimit - specified resource is too large (%x) - pid=%ld, comm=%s\n", resource, current->pid, current->comm);
+ return -EINVAL;
+ }
+
+ memcpy(&x, current->rlim+resource, sizeof(struct rlimit));
+
+ if(x.rlim_cur > RLIM_INFINITY32)
+ x32.rlim_cur = RLIM_INFINITY32;
+ else
+ x32.rlim_cur = x.rlim_cur;
+
+ if(x.rlim_max > RLIM_INFINITY32)
+ x32.rlim_max = RLIM_INFINITY32;
+ else
+ x32.rlim_max = x.rlim_max;
+
+ rc = (copy_to_user(rlim, &x32, sizeof(x32))) ? (-EFAULT) : 0;
+ if (rc == 0) {
+ PPCDBG(PPCDBG_SYS32, "sys32_old_getrlimit - current=%x, maximum=%x - pid=%ld, comm=%s\n", x32.rlim_cur, x32.rlim_max, current->pid, current->comm);
+ } else {
+ PPCDBG(PPCDBG_SYS32, "sys32_old_getrlimit - unable to copy into user's storage - pid=%ld, comm=%s\n", current->pid, current->comm);
+ }
+ return rc;
+}
+
+extern asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit *rlim);
+asmlinkage long sys32_setrlimit(unsigned int resource, struct rlimit32 *rlim)
+{
+ struct rlimit r;
+ long ret;
+ mm_segment_t old_fs = get_fs ();
+
+ PPCDBG(PPCDBG_SYS32, "sys32_setrlimit - entered - resource=%x, rlim=%p - pid=%ld, comm=%s\n", resource, rlim, current->pid, current->comm);
+
+ if (resource >= RLIM_NLIMITS) return -EINVAL;
+ if (get_user (r.rlim_cur, &rlim->rlim_cur) ||
+ __get_user (r.rlim_max, &rlim->rlim_max))
+ return -EFAULT;
+ if (r.rlim_cur >= RLIM_INFINITY32)
+ r.rlim_cur = RLIM_INFINITY;
+ if (r.rlim_max >= RLIM_INFINITY32)
+ r.rlim_max = RLIM_INFINITY;
+ set_fs (KERNEL_DS);
+ ret = sys_setrlimit(resource, &r);
+ set_fs (old_fs);
+
+ PPCDBG(PPCDBG_SYS32, "sys32_setrlimit - exited w/ ret=%x - pid=%ld, comm=%s\n", ret, current->pid, current->comm);
+ return ret;
+}
+
+
+struct rusage32 {
+ struct timeval32 ru_utime;
+ struct timeval32 ru_stime;
+ s32 ru_maxrss;
+ s32 ru_ixrss;
+ s32 ru_idrss;
+ s32 ru_isrss;
+ s32 ru_minflt;
+ s32 ru_majflt;
+ s32 ru_nswap;
+ s32 ru_inblock;
+ s32 ru_oublock;
+ s32 ru_msgsnd;
+ s32 ru_msgrcv;
+ s32 ru_nsignals;
+ s32 ru_nvcsw;
+ s32 ru_nivcsw;
+};
+
+static int put_rusage (struct rusage32 *ru, struct rusage *r)
+{
+ int err;
+
+ err = put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
+ err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
+ err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
+ err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
+ err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
+ err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
+ err |= __put_user (r->ru_idrss, &ru->ru_idrss);
+ err |= __put_user (r->ru_isrss, &ru->ru_isrss);
+ err |= __put_user (r->ru_minflt, &ru->ru_minflt);
+ err |= __put_user (r->ru_majflt, &ru->ru_majflt);
+ err |= __put_user (r->ru_nswap, &ru->ru_nswap);
+ err |= __put_user (r->ru_inblock, &ru->ru_inblock);
+ err |= __put_user (r->ru_oublock, &ru->ru_oublock);
+ err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
+ err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
+ err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
+ err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
+ err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
+ return err;
+}
+
+
+extern asmlinkage long sys_getrusage(int who, struct rusage *ru);
+
+/* Note: it is necessary to treat who as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_getrusage(u32 who, struct rusage32 *ru)
+{
+ struct rusage r;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_getrusage - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ set_fs (KERNEL_DS);
+ ret = sys_getrusage((int)who, &r);
+ set_fs (old_fs);
+ if (put_rusage (ru, &r))
+ return -EFAULT;
+
+ return ret;
+}
+
+
+
+
+struct sysinfo32 {
+ s32 uptime;
+ u32 loads[3];
+ u32 totalram;
+ u32 freeram;
+ u32 sharedram;
+ u32 bufferram;
+ u32 totalswap;
+ u32 freeswap;
+ unsigned short procs;
+ char _f[22];
+};
+
+extern asmlinkage long sys_sysinfo(struct sysinfo *info);
+
+asmlinkage long sys32_sysinfo(struct sysinfo32 *info)
+{
+ struct sysinfo s;
+ int ret, err;
+ mm_segment_t old_fs = get_fs ();
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sysinfo - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ set_fs (KERNEL_DS);
+ ret = sys_sysinfo(&s);
+ set_fs (old_fs);
+ err = put_user (s.uptime, &info->uptime);
+ err |= __put_user (s.loads[0], &info->loads[0]);
+ err |= __put_user (s.loads[1], &info->loads[1]);
+ err |= __put_user (s.loads[2], &info->loads[2]);
+ err |= __put_user (s.totalram, &info->totalram);
+ err |= __put_user (s.freeram, &info->freeram);
+ err |= __put_user (s.sharedram, &info->sharedram);
+ err |= __put_user (s.bufferram, &info->bufferram);
+ err |= __put_user (s.totalswap, &info->totalswap);
+ err |= __put_user (s.freeswap, &info->freeswap);
+ err |= __put_user (s.procs, &info->procs);
+ if (err)
+ return -EFAULT;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sysinfo - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return ret;
+}
+
+
+
+
+/* Translations due to time_t size differences. Which affects all
+ sorts of things, like timeval and itimerval. */
+extern struct timezone sys_tz;
+extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz);
+
+asmlinkage long sys32_gettimeofday(struct timeval32 *tv, struct timezone *tz)
+{
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_gettimeofday - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (put_tv32(tv, &ktv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+
+asmlinkage long sys32_settimeofday(struct timeval32 *tv, struct timezone *tz)
+{
+ struct timeval ktv;
+ struct timezone ktz;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_settimeofday - running - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ if (tv) {
+ if (get_tv32(&ktv, tv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&ktz, tz, sizeof(ktz)))
+ return -EFAULT;
+ }
+
+ return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL);
+}
+
+
+
+
+struct tms32 {
+ __kernel_clock_t32 tms_utime;
+ __kernel_clock_t32 tms_stime;
+ __kernel_clock_t32 tms_cutime;
+ __kernel_clock_t32 tms_cstime;
+};
+
+extern asmlinkage long sys_times(struct tms * tbuf);
+
+asmlinkage long sys32_times(struct tms32 *tbuf)
+{
+ struct tms t;
+ long ret;
+ mm_segment_t old_fs = get_fs ();
+ int err;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_times - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ set_fs (KERNEL_DS);
+ ret = sys_times(tbuf ? &t : NULL);
+ set_fs (old_fs);
+ if (tbuf) {
+ err = put_user (t.tms_utime, &tbuf->tms_utime);
+ err |= __put_user (t.tms_stime, &tbuf->tms_stime);
+ err |= __put_user (t.tms_cutime, &tbuf->tms_cutime);
+ err |= __put_user (t.tms_cstime, &tbuf->tms_cstime);
+ if (err)
+ ret = -EFAULT;
+ }
+
+ PPCDBG(PPCDBG_SYS32, "sys32_times - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return ret;
+}
+
+struct msgbuf32 { s32 mtype; char mtext[1]; };
+
+struct semid_ds32 {
+ struct ipc_perm sem_perm;
+ __kernel_time_t32 sem_otime;
+ __kernel_time_t32 sem_ctime;
+ u32 sem_base;
+ u32 sem_pending;
+ u32 sem_pending_last;
+ u32 undo;
+ unsigned short sem_nsems;
+};
+
+struct semid64_ds32 {
+ struct ipc64_perm sem_perm;
+ unsigned int __unused1;
+ __kernel_time_t32 sem_otime;
+ unsigned int __unused2;
+ __kernel_time_t32 sem_ctime;
+ u32 sem_nsems;
+ u32 __unused3;
+ u32 __unused4;
+};
+
+struct msqid_ds32
+{
+ struct ipc_perm msg_perm;
+ u32 msg_first;
+ u32 msg_last;
+ __kernel_time_t32 msg_stime;
+ __kernel_time_t32 msg_rtime;
+ __kernel_time_t32 msg_ctime;
+ u32 msg_lcbytes;
+ u32 msg_lqbytes;
+ unsigned short msg_cbytes;
+ unsigned short msg_qnum;
+ unsigned short msg_qbytes;
+ __kernel_ipc_pid_t32 msg_lspid;
+ __kernel_ipc_pid_t32 msg_lrpid;
+};
+
+struct msqid64_ds32 {
+ struct ipc64_perm msg_perm;
+ unsigned int __unused1;
+ __kernel_time_t32 msg_stime;
+ unsigned int __unused2;
+ __kernel_time_t32 msg_rtime;
+ unsigned int __unused3;
+ __kernel_time_t32 msg_ctime;
+ unsigned int msg_cbytes;
+ unsigned int msg_qnum;
+ unsigned int msg_qbytes;
+ __kernel_pid_t32 msg_lspid;
+ __kernel_pid_t32 msg_lrpid;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+struct shmid_ds32 {
+ struct ipc_perm shm_perm;
+ int shm_segsz;
+ __kernel_time_t32 shm_atime;
+ __kernel_time_t32 shm_dtime;
+ __kernel_time_t32 shm_ctime;
+ __kernel_ipc_pid_t32 shm_cpid;
+ __kernel_ipc_pid_t32 shm_lpid;
+ unsigned short shm_nattch;
+ unsigned short __unused;
+ unsigned int __unused2;
+ unsigned int __unused3;
+};
+
+struct shmid64_ds32 {
+ struct ipc64_perm shm_perm;
+ unsigned int __unused1;
+ __kernel_time_t32 shm_atime;
+ unsigned int __unused2;
+ __kernel_time_t32 shm_dtime;
+ unsigned int __unused3;
+ __kernel_time_t32 shm_ctime;
+ unsigned int __unused4;
+ __kernel_size_t32 shm_segsz;
+ __kernel_pid_t32 shm_cpid;
+ __kernel_pid_t32 shm_lpid;
+ unsigned int shm_nattch;
+ unsigned int __unused5;
+ unsigned int __unused6;
+};
+
+/*
+ * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit
+ * emulation..
+ *
+ * This is really horribly ugly.
+ */
+static long do_sys32_semctl(int first, int second, int third, void *uptr)
+{
+ union semun fourth;
+ u32 pad;
+ int err, err2;
+ mm_segment_t old_fs;
+
+ if (!uptr)
+ return -EINVAL;
+ err = -EFAULT;
+ if (get_user(pad, (u32 *)uptr))
+ return err;
+ if (third == SETVAL)
+ fourth.val = (int)pad;
+ else
+ fourth.__pad = (void *)A(pad);
+ switch (third & (~IPC_64)) {
+
+ case IPC_INFO:
+ case IPC_RMID:
+ case SEM_INFO:
+ case GETVAL:
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ case GETALL:
+ case SETALL:
+ case SETVAL:
+ err = sys_semctl(first, second, third, fourth);
+ break;
+
+ case IPC_STAT:
+ case SEM_STAT:
+ if (third & IPC_64) {
+ struct semid64_ds s64;
+ struct semid64_ds32 *usp;
+
+ usp = (struct semid64_ds32 *)A(pad);
+ fourth.__pad = &s64;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_semctl(first, second, third, fourth);
+ set_fs(old_fs);
+ err2 = copy_to_user(&usp->sem_perm, &s64.sem_perm,
+ sizeof(struct ipc64_perm));
+ err2 |= __put_user(s64.sem_otime, &usp->sem_otime);
+ err2 |= __put_user(s64.sem_ctime, &usp->sem_ctime);
+ err2 |= __put_user(s64.sem_nsems, &usp->sem_nsems);
+ if (err2)
+ err = -EFAULT;
+ } else {
+ struct semid_ds s;
+ struct semid_ds32 *usp;
+
+ usp = (struct semid_ds32 *)A(pad);
+ fourth.__pad = &s;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_semctl(first, second, third, fourth);
+ set_fs(old_fs);
+ err2 = copy_to_user(&usp->sem_perm, &s.sem_perm,
+ sizeof(struct ipc_perm));
+ err2 |= __put_user(s.sem_otime, &usp->sem_otime);
+ err2 |= __put_user(s.sem_ctime, &usp->sem_ctime);
+ err2 |= __put_user(s.sem_nsems, &usp->sem_nsems);
+ if (err2)
+ err = -EFAULT;
+ }
+ break;
+
+ case IPC_SET:
+ if (third & IPC_64) {
+ struct semid64_ds s64;
+ struct semid64_ds32 *usp;
+
+ usp = (struct semid64_ds32 *)A(pad);
+
+ err = get_user(s64.sem_perm.uid, &usp->sem_perm.uid);
+ err |= __get_user(s64.sem_perm.gid,
+ &usp->sem_perm.gid);
+ err |= __get_user(s64.sem_perm.mode,
+ &usp->sem_perm.mode);
+ if (err)
+ goto out;
+ fourth.__pad = &s64;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_semctl(first, second, third, fourth);
+ set_fs(old_fs);
+
+ } else {
+ struct semid_ds s;
+ struct semid_ds32 *usp;
+
+ usp = (struct semid_ds32 *)A(pad);
+
+ err = get_user(s.sem_perm.uid, &usp->sem_perm.uid);
+ err |= __get_user(s.sem_perm.gid,
+ &usp->sem_perm.gid);
+ err |= __get_user(s.sem_perm.mode,
+ &usp->sem_perm.mode);
+ if (err)
+ goto out;
+ fourth.__pad = &s;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_semctl(first, second, third, fourth);
+ set_fs(old_fs);
+ }
+ break;
+ }
+out:
+ return err;
+}
+
+static int
+do_sys32_msgsnd(int first, int second, int third, void *uptr)
+{
+ struct msgbuf *p;
+ struct msgbuf32 *up = (struct msgbuf32 *)uptr;
+ mm_segment_t old_fs;
+ int err;
+
+ if (second < 0)
+ return -EINVAL;
+
+ p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER);
+ if (!p)
+ return -ENOMEM;
+ err = get_user(p->mtype, &up->mtype);
+ err |= __copy_from_user(p->mtext, &up->mtext, second);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_msgsnd(first, p, second, third);
+ set_fs(old_fs);
+out:
+ kfree(p);
+ return err;
+}
+
+static int
+do_sys32_msgrcv(int first, int second, int msgtyp, int third,
+ int version, void *uptr)
+{
+ struct msgbuf32 *up;
+ struct msgbuf *p;
+ mm_segment_t old_fs;
+ int err;
+
+ if (second < 0)
+ return -EINVAL;
+
+ if (!version) {
+ struct ipc_kludge *uipck = (struct ipc_kludge *)uptr;
+ struct ipc_kludge ipck;
+
+ err = -EINVAL;
+ if (!uptr)
+ goto out;
+ err = -EFAULT;
+ if (copy_from_user(&ipck, uipck, sizeof(struct ipc_kludge)))
+ goto out;
+ uptr = (void *)A(ipck.msgp);
+ msgtyp = ipck.msgtyp;
+ }
+ err = -ENOMEM;
+ p = kmalloc(second + sizeof (struct msgbuf) + 4, GFP_USER);
+ if (!p)
+ goto out;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_msgrcv(first, p, second + 4, msgtyp, third);
+ set_fs(old_fs);
+ if (err < 0)
+ goto free_then_out;
+ up = (struct msgbuf32 *)uptr;
+ if (put_user(p->mtype, &up->mtype) ||
+ __copy_to_user(&up->mtext, p->mtext, err))
+ err = -EFAULT;
+free_then_out:
+ kfree(p);
+out:
+ return err;
+}
+
+static int
+do_sys32_msgctl(int first, int second, void *uptr)
+{
+ int err = -EINVAL, err2;
+ mm_segment_t old_fs;
+
+ switch (second & (~IPC_64)) {
+
+ case IPC_INFO:
+ case IPC_RMID:
+ case MSG_INFO:
+ err = sys_msgctl(first, second, (struct msqid_ds *)uptr);
+ break;
+
+ case IPC_SET:
+ if (second & IPC_64) {
+ struct msqid64_ds m64;
+ struct msqid64_ds32 *up = (struct msqid64_ds32 *)uptr;
+
+ err2 = copy_from_user(&m64.msg_perm, &up->msg_perm,
+ sizeof(struct ipc64_perm));
+ err2 |= __get_user(m64.msg_qbytes, &up->msg_qbytes);
+ if (err2) {
+ err = -EFAULT;
+ break;
+ }
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_msgctl(first, second,
+ (struct msqid_ds *)&m64);
+ set_fs(old_fs);
+ } else {
+ struct msqid_ds m;
+ struct msqid_ds32 *up = (struct msqid_ds32 *)uptr;
+
+ err2 = copy_from_user(&m.msg_perm, &up->msg_perm,
+ sizeof(struct ipc_perm));
+ err2 |= __get_user(m.msg_qbytes, &up->msg_qbytes);
+ if (err2) {
+ err = -EFAULT;
+ break;
+ }
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_msgctl(first, second, &m);
+ set_fs(old_fs);
+ }
+ break;
+
+ case IPC_STAT:
+ case MSG_STAT:
+ if (second & IPC_64) {
+ struct msqid64_ds m64;
+ struct msqid64_ds32 *up = (struct msqid64_ds32 *)uptr;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_msgctl(first, second,
+ (struct msqid_ds *)&m64);
+ set_fs(old_fs);
+
+ err2 = copy_to_user(&up->msg_perm, &m64.msg_perm,
+ sizeof(struct ipc64_perm));
+ err2 |= __put_user(m64.msg_stime, &up->msg_stime);
+ err2 |= __put_user(m64.msg_rtime, &up->msg_rtime);
+ err2 |= __put_user(m64.msg_ctime, &up->msg_ctime);
+ err2 |= __put_user(m64.msg_cbytes, &up->msg_cbytes);
+ err2 |= __put_user(m64.msg_qnum, &up->msg_qnum);
+ err2 |= __put_user(m64.msg_qbytes, &up->msg_qbytes);
+ err2 |= __put_user(m64.msg_lspid, &up->msg_lspid);
+ err2 |= __put_user(m64.msg_lrpid, &up->msg_lrpid);
+ if (err2)
+ err = -EFAULT;
+ } else {
+ struct msqid64_ds m;
+ struct msqid_ds32 *up = (struct msqid_ds32 *)uptr;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_msgctl(first, second, (struct msqid_ds *)&m);
+ set_fs(old_fs);
+
+ err2 = copy_to_user(&up->msg_perm, &m.msg_perm,
+ sizeof(struct ipc_perm));
+ err2 |= __put_user(m.msg_stime, &up->msg_stime);
+ err2 |= __put_user(m.msg_rtime, &up->msg_rtime);
+ err2 |= __put_user(m.msg_ctime, &up->msg_ctime);
+ err2 |= __put_user(m.msg_cbytes, &up->msg_cbytes);
+ err2 |= __put_user(m.msg_qnum, &up->msg_qnum);
+ err2 |= __put_user(m.msg_qbytes, &up->msg_qbytes);
+ err2 |= __put_user(m.msg_lspid, &up->msg_lspid);
+ err2 |= __put_user(m.msg_lrpid, &up->msg_lrpid);
+ if (err2)
+ err = -EFAULT;
+ }
+ break;
+ }
+ return err;
+}
+
+static int
+do_sys32_shmat(int first, int second, int third, int version, void *uptr)
+{
+ unsigned long raddr;
+ u32 *uaddr = (u32 *)A((u32)third);
+ int err = -EINVAL;
+
+ if (version == 1)
+ return err;
+ err = sys_shmat(first, uptr, second, &raddr);
+ if (err)
+ return err;
+ err = put_user(raddr, uaddr);
+ return err;
+}
+
+static int
+do_sys32_shmctl(int first, int second, void *uptr)
+{
+ int err = -EFAULT, err2;
+ mm_segment_t old_fs;
+
+ switch (second & (~IPC_64)) {
+
+ case IPC_INFO:
+ case IPC_RMID:
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ err = sys_shmctl(first, second, (struct shmid_ds *)uptr);
+ break;
+ case IPC_SET:
+ if (second & IPC_64) {
+ struct shmid64_ds32 *up = (struct shmid64_ds32 *)uptr;
+ struct shmid64_ds s64;
+
+ err = get_user(s64.shm_perm.uid, &up->shm_perm.uid);
+ err |= __get_user(s64.shm_perm.gid, &up->shm_perm.gid);
+ err |= __get_user(s64.shm_perm.mode,
+ &up->shm_perm.mode);
+ if (err)
+ break;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_shmctl(first, second,
+ (struct shmid_ds *)&s64);
+ set_fs(old_fs);
+ } else {
+ struct shmid_ds32 *up = (struct shmid_ds32 *)uptr;
+ struct shmid_ds s;
+
+ err = get_user(s.shm_perm.uid, &up->shm_perm.uid);
+ err |= __get_user(s.shm_perm.gid, &up->shm_perm.gid);
+ err |= __get_user(s.shm_perm.mode, &up->shm_perm.mode);
+ if (err)
+ break;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_shmctl(first, second, &s);
+ set_fs(old_fs);
+ }
+ break;
+
+ case IPC_STAT:
+ case SHM_STAT:
+ if (second & IPC_64) {
+ struct shmid64_ds32 *up = (struct shmid64_ds32 *)uptr;
+ struct shmid64_ds s64;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_shmctl(first, second,
+ (struct shmid_ds *)&s64);
+ set_fs(old_fs);
+ if (err < 0)
+ break;
+
+ err2 = copy_to_user(&up->shm_perm, &s64.shm_perm,
+ sizeof(struct ipc64_perm));
+ err2 |= __put_user(s64.shm_atime, &up->shm_atime);
+ err2 |= __put_user(s64.shm_dtime, &up->shm_dtime);
+ err2 |= __put_user(s64.shm_ctime, &up->shm_ctime);
+ err2 |= __put_user(s64.shm_segsz, &up->shm_segsz);
+ err2 |= __put_user(s64.shm_nattch, &up->shm_nattch);
+ err2 |= __put_user(s64.shm_cpid, &up->shm_cpid);
+ err2 |= __put_user(s64.shm_lpid, &up->shm_lpid);
+ if (err2)
+ err = -EFAULT;
+ } else {
+ struct shmid_ds32 *up = (struct shmid_ds32 *)uptr;
+ struct shmid_ds s;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_shmctl(first, second, &s);
+ set_fs(old_fs);
+ if (err < 0)
+ break;
+
+ err2 = copy_to_user(&up->shm_perm, &s.shm_perm,
+ sizeof(struct ipc_perm));
+ err2 |= __put_user (s.shm_atime, &up->shm_atime);
+ err2 |= __put_user (s.shm_dtime, &up->shm_dtime);
+ err2 |= __put_user (s.shm_ctime, &up->shm_ctime);
+ err2 |= __put_user (s.shm_segsz, &up->shm_segsz);
+ err2 |= __put_user (s.shm_nattch, &up->shm_nattch);
+ err2 |= __put_user (s.shm_cpid, &up->shm_cpid);
+ err2 |= __put_user (s.shm_lpid, &up->shm_lpid);
+ if (err2)
+ err = -EFAULT;
+ }
+ break;
+
+ case SHM_INFO: {
+ struct shm_info si;
+ struct shm_info32 {
+ int used_ids;
+ u32 shm_tot, shm_rss, shm_swp;
+ u32 swap_attempts, swap_successes;
+ } *uip = (struct shm_info32 *)uptr;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_shmctl(first, second, (struct shmid_ds *)&si);
+ set_fs(old_fs);
+ if (err < 0)
+ break;
+ err2 = put_user(si.used_ids, &uip->used_ids);
+ err2 |= __put_user(si.shm_tot, &uip->shm_tot);
+ err2 |= __put_user(si.shm_rss, &uip->shm_rss);
+ err2 |= __put_user(si.shm_swp, &uip->shm_swp);
+ err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
+ err2 |= __put_user(si.swap_successes, &uip->swap_successes);
+ if (err2)
+ err = -EFAULT;
+ break;
+ }
+ }
+ return err;
+}
+
+/*
+ * Note: it is necessary to treat first_parm, second_parm, and
+ * third_parm as unsigned ints, with the corresponding cast to a
+ * signed int to insure that the proper conversion (sign extension)
+ * between the register representation of a signed int (msr in 32-bit
+ * mode) and the register representation of a signed int (msr in
+ * 64-bit mode) is performed.
+ */
+asmlinkage long sys32_ipc(u32 call, u32 first_parm, u32 second_parm, u32 third_parm, u32 ptr, u32 fifth)
+{
+ int first = (int)first_parm;
+ int second = (int)second_parm;
+ int third = (int)third_parm;
+ int version, err;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_ipc - entered - call=%x, parm1=%x, parm2=%x, parm3=%x, parm4=%x, parm5=%x \n",
+ call, first_parm, second_parm, third_parm, ptr, fifth);
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ switch (call) {
+
+ case SEMOP:
+ /* struct sembuf is the same on 32 and 64bit :)) */
+ err = sys_semop(first, (struct sembuf *)AA(ptr),
+ second);
+ break;
+ case SEMGET:
+ err = sys_semget(first, second, third);
+ break;
+ case SEMCTL:
+ err = do_sys32_semctl(first, second, third,
+ (void *)AA(ptr));
+ break;
+
+ case MSGSND:
+ err = do_sys32_msgsnd(first, second, third,
+ (void *)AA(ptr));
+ break;
+ case MSGRCV:
+ err = do_sys32_msgrcv(first, second, fifth, third,
+ version, (void *)AA(ptr));
+ break;
+ case MSGGET:
+ err = sys_msgget((key_t)first, second);
+ break;
+ case MSGCTL:
+ err = do_sys32_msgctl(first, second, (void *)AA(ptr));
+ break;
+
+ case SHMAT:
+ err = do_sys32_shmat(first, second, third,
+ version, (void *)AA(ptr));
+ break;
+ case SHMDT:
+ err = sys_shmdt((char *)AA(ptr));
+ break;
+ case SHMGET:
+ err = sys_shmget(first, second, third);
+ break;
+ case SHMCTL:
+ err = do_sys32_shmctl(first, second, (void *)AA(ptr));
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+
+ PPCDBG(PPCDBG_SYS32, "sys32_ipc - exited w/ %d/0x%x \n", err, err);
+ return err;
+}
+
+/* stat syscall methods. */
+extern asmlinkage int sys_stat(char* filename, struct __old_kernel_stat* statbuf);
+
+static int cp_old_stat32(struct inode* inode, struct __old_kernel_stat32* statbuf)
+{
+ static int warncount = 5;
+ struct __old_kernel_stat32 tmp;
+
+ if (warncount) {
+ warncount--;
+ printk("VFS: Warning: %s using old stat() call. Recompile your binary.\n",
+ current->comm);
+ }
+
+ tmp.st_dev = kdev_t_to_nr(inode->i_dev);
+ tmp.st_ino = inode->i_ino;
+ tmp.st_mode = inode->i_mode;
+ tmp.st_nlink = inode->i_nlink;
+ SET_OLDSTAT_UID(tmp, inode->i_uid);
+ SET_OLDSTAT_GID(tmp, inode->i_gid);
+ tmp.st_rdev = kdev_t_to_nr(inode->i_rdev);
+ tmp.st_size = inode->i_size;
+ tmp.st_atime = inode->i_atime;
+ tmp.st_mtime = inode->i_mtime;
+ tmp.st_ctime = inode->i_ctime;
+ return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long sys32_stat(char* filename, struct __old_kernel_stat32* statbuf)
+{
+ struct nameidata nd;
+ int error;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_stat - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ error = user_path_walk(filename, &nd);
+ if (!error) {
+ error = do_revalidate(nd.dentry);
+ if (!error)
+ error = cp_old_stat32(nd.dentry->d_inode, statbuf);
+ path_release(&nd);
+ }
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_stat - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return error;
+}
+
+asmlinkage long sys32_fstat(unsigned int fd, struct __old_kernel_stat32* statbuf)
+{
+ struct file *f;
+ int err = -EBADF;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_fstat - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ f = fget(fd);
+ if (f) {
+ struct dentry * dentry = f->f_dentry;
+
+ err = do_revalidate(dentry);
+ if (!err)
+ err = cp_old_stat32(dentry->d_inode, statbuf);
+ fput(f);
+ }
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_fstat - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return err;
+}
+
+asmlinkage long sys32_lstat(char* filename, struct __old_kernel_stat32* statbuf)
+{
+ struct nameidata nd;
+ int error;
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_lstat - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ error = user_path_walk_link(filename, &nd);
+ if (!error) {
+ error = do_revalidate(nd.dentry);
+ if (!error)
+ error = cp_old_stat32(nd.dentry->d_inode, statbuf);
+
+ path_release(&nd);
+ }
+
+ PPCDBG(PPCDBG_SYS32X, "sys32_lstat - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return error;
+}
+
+extern asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t* offset, size_t count);
+
+/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, __kernel_off_t32* offset, u32 count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ off_t of;
+
+ if (offset && get_user(of, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile((int)out_fd, (int)in_fd, offset ? &of : NULL, count);
+ set_fs(old_fs);
+
+ if (offset && put_user(of, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+extern asmlinkage int sys_setsockopt(int fd, int level, int optname, char *optval, int optlen);
+
+asmlinkage long sys32_setsockopt(int fd, int level, int optname, char* optval, int optlen)
+{
+
+ PPCDBG(PPCDBG_SYS32,"sys32_setsockopt - running - pid=%ld, comm=%s\n", current->pid, current->comm);
+
+ if (optname == SO_ATTACH_FILTER) {
+ struct sock_fprog32 {
+ __u16 len;
+ __u32 filter;
+ } *fprog32 = (struct sock_fprog32 *)optval;
+ struct sock_fprog kfprog;
+ struct sock_filter *kfilter;
+ unsigned int fsize;
+ mm_segment_t old_fs;
+ __u32 uptr;
+ int ret;
+
+ if (get_user(kfprog.len, &fprog32->len) ||
+ __get_user(uptr, &fprog32->filter))
+ return -EFAULT;
+ kfprog.filter = (struct sock_filter *)A(uptr);
+ fsize = kfprog.len * sizeof(struct sock_filter);
+ kfilter = (struct sock_filter *)kmalloc(fsize, GFP_KERNEL);
+ if (kfilter == NULL)
+ return -ENOMEM;
+ if (copy_from_user(kfilter, kfprog.filter, fsize)) {
+ kfree(kfilter);
+ return -EFAULT;
+ }
+ kfprog.filter = kfilter;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_setsockopt(fd, level, optname,
+ (char *)&kfprog, sizeof(kfprog));
+ set_fs(old_fs);
+ kfree(kfilter);
+ return ret;
+ }
+ return sys_setsockopt(fd, level, optname, optval, optlen);
+}
+
+
+
+
+#define MAX_SOCK_ADDR 128 /* 108 for Unix domain - 16 for IP, 16 for IPX, 24 for IPv6, about 80 for AX.25 */
+#define __CMSG32_NXTHDR(ctl, len, cmsg, cmsglen) __cmsg32_nxthdr((ctl),(len),(cmsg),(cmsglen))
+#define CMSG32_NXTHDR(mhdr, cmsg, cmsglen) cmsg32_nxthdr((mhdr), (cmsg), (cmsglen))
+
+#define CMSG32_ALIGN(len) ( ((len)+sizeof(int)-1) & ~(sizeof(int)-1) )
+
+#define CMSG32_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG32_ALIGN(sizeof(struct cmsghdr32))))
+#define CMSG32_SPACE(len) (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + CMSG32_ALIGN(len))
+#define CMSG32_LEN(len) (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + (len))
+#define __CMSG32_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr32) ? \
+ (struct cmsghdr32 *)(ctl) : \
+ (struct cmsghdr32 *)NULL)
+#define CMSG32_FIRSTHDR(msg) __CMSG32_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen)
+
+struct msghdr32
+{
+ u32 msg_name;
+ int msg_namelen;
+ u32 msg_iov;
+ __kernel_size_t32 msg_iovlen;
+ u32 msg_control;
+ __kernel_size_t32 msg_controllen;
+ unsigned msg_flags;
+};
+
+struct cmsghdr32
+{
+ __kernel_size_t32 cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+__inline__ struct cmsghdr32 *__cmsg32_nxthdr(void *__ctl, __kernel_size_t __size,
+ struct cmsghdr32 *__cmsg, int __cmsg_len)
+{
+ struct cmsghdr32 * __ptr;
+
+ __ptr = (struct cmsghdr32 *)(((unsigned char *) __cmsg) +
+ CMSG32_ALIGN(__cmsg_len));
+ if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
+ return NULL;
+
+ return __ptr;
+}
+
+__inline__ struct cmsghdr32 *cmsg32_nxthdr (struct msghdr *__msg,
+ struct cmsghdr32 *__cmsg,
+ int __cmsg_len)
+{
+ return __cmsg32_nxthdr(__msg->msg_control, __msg->msg_controllen,
+ __cmsg, __cmsg_len);
+}
+
+extern struct socket *sockfd_lookup(int fd, int *err);
+
+extern __inline__ void sockfd_put(struct socket *sock)
+{
+ fput(sock->file);
+}
+
+static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg, struct msghdr32 *umsg)
+{
+ u32 tmp1, tmp2, tmp3;
+ int err;
+
+ err = get_user(tmp1, &umsg->msg_name);
+ err |= __get_user(tmp2, &umsg->msg_iov);
+ err |= __get_user(tmp3, &umsg->msg_control);
+ if (err)
+ return -EFAULT;
+
+ kmsg->msg_name = (void *)A(tmp1);
+ kmsg->msg_iov = (struct iovec *)A(tmp2);
+ kmsg->msg_control = (void *)A(tmp3);
+
+ err = get_user(kmsg->msg_namelen, &umsg->msg_namelen);
+ err |= get_user(kmsg->msg_iovlen, &umsg->msg_iovlen);
+ err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen);
+ err |= get_user(kmsg->msg_flags, &umsg->msg_flags);
+
+ return err;
+}
+
+static inline int iov_from_user32_to_kern(struct iovec *kiov,
+ struct iovec32 *uiov32,
+ int niov)
+{
+ int tot_len = 0;
+
+ while(niov > 0) {
+ u32 len, buf;
+
+ if(get_user(len, &uiov32->iov_len) ||
+ get_user(buf, &uiov32->iov_base)) {
+ tot_len = -EFAULT;
+ break;
+ }
+ tot_len += len;
+ kiov->iov_base = (void *)A(buf);
+ kiov->iov_len = (__kernel_size_t) len;
+ uiov32++;
+ kiov++;
+ niov--;
+ }
+ return tot_len;
+}
+
+/* I've named the args so it is easy to tell whose space the pointers are in. */
+static int verify_iovec32(struct msghdr *kern_msg, struct iovec *kern_iov,
+ char *kern_address, int mode)
+{
+ int tot_len;
+
+ if(kern_msg->msg_namelen) {
+ if(mode==VERIFY_READ) {
+ int err = move_addr_to_kernel(kern_msg->msg_name,
+ kern_msg->msg_namelen,
+ kern_address);
+ if(err < 0)
+ return err;
+ }
+ kern_msg->msg_name = kern_address;
+ } else
+ kern_msg->msg_name = NULL;
+
+ if(kern_msg->msg_iovlen > UIO_FASTIOV) {
+ kern_iov = kmalloc(kern_msg->msg_iovlen * sizeof(struct iovec),
+ GFP_KERNEL);
+ if(!kern_iov)
+ return -ENOMEM;
+ }
+
+ tot_len = iov_from_user32_to_kern(kern_iov,
+ (struct iovec32 *)kern_msg->msg_iov,
+ kern_msg->msg_iovlen);
+ if(tot_len >= 0)
+ kern_msg->msg_iov = kern_iov;
+ else if(kern_msg->msg_iovlen > UIO_FASTIOV)
+ kfree(kern_iov);
+
+ return tot_len;
+}
+
+/* There is a lot of hair here because the alignment rules (and
+ * thus placement) of cmsg headers and length are different for
+ * 32-bit apps. -DaveM
+ */
+static int cmsghdr_from_user32_to_kern(struct msghdr *kmsg,
+ unsigned char *stackbuf, int stackbuf_size)
+{
+ struct cmsghdr32 *ucmsg;
+ struct cmsghdr *kcmsg, *kcmsg_base;
+ __kernel_size_t32 ucmlen;
+ __kernel_size_t kcmlen, tmp;
+
+ kcmlen = 0;
+ kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf;
+ ucmsg = CMSG32_FIRSTHDR(kmsg);
+ while(ucmsg != NULL) {
+ if(get_user(ucmlen, &ucmsg->cmsg_len))
+ return -EFAULT;
+
+ /* Catch bogons. */
+ if(CMSG32_ALIGN(ucmlen) <
+ CMSG32_ALIGN(sizeof(struct cmsghdr32)))
+ return -EINVAL;
+ if((unsigned long)(((char *)ucmsg - (char *)kmsg->msg_control)
+ + ucmlen) > kmsg->msg_controllen)
+ return -EINVAL;
+
+ tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) +
+ CMSG_ALIGN(sizeof(struct cmsghdr)));
+ kcmlen += tmp;
+ ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen);
+ }
+ if (kcmlen == 0)
+ return -EINVAL;
+
+ /* The kcmlen holds the 64-bit version of the control length.
+ * It may not be modified as we do not stick it into the kmsg
+ * until we have successfully copied over all of the data
+ * from the user.
+ */
+ if (kcmlen > stackbuf_size)
+ kcmsg_base = kcmsg = kmalloc(kcmlen, GFP_KERNEL);
+ if (kcmsg == NULL)
+ return -ENOBUFS;
+
+ /* Now copy them over neatly. */
+ memset(kcmsg, 0, kcmlen);
+ ucmsg = CMSG32_FIRSTHDR(kmsg);
+ while (ucmsg != NULL) {
+ __get_user(ucmlen, &ucmsg->cmsg_len);
+ tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) +
+ CMSG_ALIGN(sizeof(struct cmsghdr)));
+ kcmsg->cmsg_len = tmp;
+ __get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level);
+ __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type);
+
+ /* Copy over the data. */
+ if(copy_from_user(CMSG_DATA(kcmsg),
+ CMSG32_DATA(ucmsg),
+ (ucmlen - CMSG32_ALIGN(sizeof(*ucmsg)))))
+ goto out_free_efault;
+
+ /* Advance. */
+ kcmsg = (struct cmsghdr *)((char *)kcmsg + CMSG_ALIGN(tmp));
+ ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen);
+ }
+
+ /* Ok, looks like we made it. Hook it up and return success. */
+ kmsg->msg_control = kcmsg_base;
+ kmsg->msg_controllen = kcmlen;
+ return 0;
+
+out_free_efault:
+ if(kcmsg_base != (struct cmsghdr *)stackbuf)
+ kfree(kcmsg_base);
+ return -EFAULT;
+}
+
+asmlinkage long sys32_sendmsg(int fd, struct msghdr32* user_msg, unsigned int user_flags)
+{
+ struct socket *sock;
+ char address[MAX_SOCK_ADDR];
+ struct iovec iov[UIO_FASTIOV];
+ unsigned char ctl[sizeof(struct cmsghdr) + 20];
+ unsigned char *ctl_buf = ctl;
+ struct msghdr kern_msg;
+ int err, total_len;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sendmsg - entered - fd=%x, user_msg@=%p, user_flags=%x \n", fd, user_msg, user_flags);
+
+ if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
+ return -EFAULT;
+ if(kern_msg.msg_iovlen > UIO_MAXIOV)
+ return -EINVAL;
+ err = verify_iovec32(&kern_msg, iov, address, VERIFY_READ);
+ if (err < 0)
+ goto out;
+ total_len = err;
+
+ if(kern_msg.msg_controllen) {
+ err = cmsghdr_from_user32_to_kern(&kern_msg, ctl, sizeof(ctl));
+ if(err)
+ goto out_freeiov;
+ ctl_buf = kern_msg.msg_control;
+ }
+ kern_msg.msg_flags = user_flags;
+
+ sock = sockfd_lookup(fd, &err);
+ if (sock != NULL) {
+ if (sock->file->f_flags & O_NONBLOCK)
+ kern_msg.msg_flags |= MSG_DONTWAIT;
+ err = sock_sendmsg(sock, &kern_msg, total_len);
+ sockfd_put(sock);
+ }
+
+ /* N.B. Use kfree here, as kern_msg.msg_controllen might change? */
+ if(ctl_buf != ctl)
+ kfree(ctl_buf);
+out_freeiov:
+ if(kern_msg.msg_iov != iov)
+ kfree(kern_msg.msg_iov);
+out:
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sendmsg - exited w/ %lx \n", err);
+ return err;
+}
+
+static void put_cmsg32(struct msghdr *kmsg, int level, int type,
+ int len, void *data)
+{
+ struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control;
+ struct cmsghdr32 cmhdr;
+ int cmlen = CMSG32_LEN(len);
+
+ if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
+ kmsg->msg_flags |= MSG_CTRUNC;
+ return;
+ }
+
+ if (kmsg->msg_controllen < cmlen) {
+ kmsg->msg_flags |= MSG_CTRUNC;
+ cmlen = kmsg->msg_controllen;
+ }
+ cmhdr.cmsg_level = level;
+ cmhdr.cmsg_type = type;
+ cmhdr.cmsg_len = cmlen;
+
+ if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
+ return;
+ if (copy_to_user(CMSG32_DATA(cm), data, cmlen - sizeof(struct cmsghdr32)))
+ return;
+ cmlen = CMSG32_SPACE(len);
+ kmsg->msg_control += cmlen;
+ kmsg->msg_controllen -= cmlen;
+}
+
+
+static void scm_detach_fds32(struct msghdr *kmsg, struct scm_cookie *scm)
+{
+ struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control;
+ int fdmax = (kmsg->msg_controllen - sizeof(struct cmsghdr32)) / sizeof(int);
+ int fdnum = scm->fp->count;
+ struct file **fp = scm->fp->fp;
+ int *cmfptr;
+ int err = 0, i;
+
+ if (fdnum < fdmax)
+ fdmax = fdnum;
+
+ for (i = 0, cmfptr = (int *) CMSG32_DATA(cm); i < fdmax; i++, cmfptr++) {
+ int new_fd;
+ err = get_unused_fd();
+ if (err < 0)
+ break;
+ new_fd = err;
+ err = put_user(new_fd, cmfptr);
+ if (err) {
+ put_unused_fd(new_fd);
+ break;
+ }
+ /* Bump the usage count and install the file. */
+ get_file(fp[i]);
+ fd_install(new_fd, fp[i]);
+ }
+
+ if (i > 0) {
+ int cmlen = CMSG32_LEN(i * sizeof(int));
+ if (!err)
+ err = put_user(SOL_SOCKET, &cm->cmsg_level);
+ if (!err)
+ err = put_user(SCM_RIGHTS, &cm->cmsg_type);
+ if (!err)
+ err = put_user(cmlen, &cm->cmsg_len);
+ if (!err) {
+ cmlen = CMSG32_SPACE(i * sizeof(int));
+ kmsg->msg_control += cmlen;
+ kmsg->msg_controllen -= cmlen;
+ }
+ }
+ if (i < fdnum)
+ kmsg->msg_flags |= MSG_CTRUNC;
+
+ /*
+ * All of the files that fit in the message have had their
+ * usage counts incremented, so we just free the list.
+ */
+ __scm_destroy(scm);
+}
+
+/* In these cases we (currently) can just copy to data over verbatim
+ * because all CMSGs created by the kernel have well defined types which
+ * have the same layout in both the 32-bit and 64-bit API. One must add
+ * some special cased conversions here if we start sending control messages
+ * with incompatible types.
+ *
+ * SCM_RIGHTS and SCM_CREDENTIALS are done by hand in recvmsg32 right after
+ * we do our work. The remaining cases are:
+ *
+ * SOL_IP IP_PKTINFO struct in_pktinfo 32-bit clean
+ * IP_TTL int 32-bit clean
+ * IP_TOS __u8 32-bit clean
+ * IP_RECVOPTS variable length 32-bit clean
+ * IP_RETOPTS variable length 32-bit clean
+ * (these last two are clean because the types are defined
+ * by the IPv4 protocol)
+ * IP_RECVERR struct sock_extended_err +
+ * struct sockaddr_in 32-bit clean
+ * SOL_IPV6 IPV6_RECVERR struct sock_extended_err +
+ * struct sockaddr_in6 32-bit clean
+ * IPV6_PKTINFO struct in6_pktinfo 32-bit clean
+ * IPV6_HOPLIMIT int 32-bit clean
+ * IPV6_FLOWINFO u32 32-bit clean
+ * IPV6_HOPOPTS ipv6 hop exthdr 32-bit clean
+ * IPV6_DSTOPTS ipv6 dst exthdr(s) 32-bit clean
+ * IPV6_RTHDR ipv6 routing exthdr 32-bit clean
+ * IPV6_AUTHHDR ipv6 auth exthdr 32-bit clean
+ */
+static void cmsg32_recvmsg_fixup(struct msghdr *kmsg, unsigned long orig_cmsg_uptr)
+{
+ unsigned char *workbuf, *wp;
+ unsigned long bufsz, space_avail;
+ struct cmsghdr *ucmsg;
+
+ bufsz = ((unsigned long)kmsg->msg_control) - orig_cmsg_uptr;
+ space_avail = kmsg->msg_controllen + bufsz;
+ wp = workbuf = kmalloc(bufsz, GFP_KERNEL);
+ if(workbuf == NULL)
+ goto fail;
+
+ /* To make this more sane we assume the kernel sends back properly
+ * formatted control messages. Because of how the kernel will truncate
+ * the cmsg_len for MSG_TRUNC cases, we need not check that case either.
+ */
+ ucmsg = (struct cmsghdr *) orig_cmsg_uptr;
+ while(((unsigned long)ucmsg) <=
+ (((unsigned long)kmsg->msg_control) - sizeof(struct cmsghdr))) {
+ struct cmsghdr32 *kcmsg32 = (struct cmsghdr32 *) wp;
+ int clen64, clen32;
+
+ /* UCMSG is the 64-bit format CMSG entry in user-space.
+ * KCMSG32 is within the kernel space temporary buffer
+ * we use to convert into a 32-bit style CMSG.
+ */
+ __get_user(kcmsg32->cmsg_len, &ucmsg->cmsg_len);
+ __get_user(kcmsg32->cmsg_level, &ucmsg->cmsg_level);
+ __get_user(kcmsg32->cmsg_type, &ucmsg->cmsg_type);
+
+ clen64 = kcmsg32->cmsg_len;
+ copy_from_user(CMSG32_DATA(kcmsg32), CMSG_DATA(ucmsg),
+ clen64 - CMSG_ALIGN(sizeof(*ucmsg)));
+ clen32 = ((clen64 - CMSG_ALIGN(sizeof(*ucmsg))) +
+ CMSG32_ALIGN(sizeof(struct cmsghdr32)));
+ kcmsg32->cmsg_len = clen32;
+
+ switch (kcmsg32->cmsg_type) {
+ /*
+ * The timestamp type's data needs to be converted
+ * from 64-bit time values to 32-bit time values
+ */
+ case SO_TIMESTAMP: {
+ __kernel_time_t32* ptr_time32 = CMSG32_DATA(kcmsg32);
+ __kernel_time_t* ptr_time = CMSG_DATA(ucmsg);
+ *ptr_time32 = *ptr_time;
+ *(ptr_time32+1) = *(ptr_time+1);
+ kcmsg32->cmsg_len -= 2*(sizeof(__kernel_time_t) -
+ sizeof(__kernel_time_t32));
+ }
+ default:;
+ }
+
+ ucmsg = (struct cmsghdr *) (((char *)ucmsg) + CMSG_ALIGN(clen64));
+ wp = (((char *)kcmsg32) + CMSG32_ALIGN(kcmsg32->cmsg_len));
+ }
+
+ /* Copy back fixed up data, and adjust pointers. */
+ bufsz = (wp - workbuf);
+ copy_to_user((void *)orig_cmsg_uptr, workbuf, bufsz);
+
+ kmsg->msg_control = (struct cmsghdr *)
+ (((char *)orig_cmsg_uptr) + bufsz);
+ kmsg->msg_controllen = space_avail - bufsz;
+
+ kfree(workbuf);
+ return;
+
+fail:
+ /* If we leave the 64-bit format CMSG chunks in there,
+ * the application could get confused and crash. So to
+ * ensure greater recovery, we report no CMSGs.
+ */
+ kmsg->msg_controllen += bufsz;
+ kmsg->msg_control = (void *) orig_cmsg_uptr;
+}
+
+asmlinkage long sys32_recvmsg(int fd, struct msghdr32* user_msg, unsigned int user_flags)
+{
+ struct iovec iovstack[UIO_FASTIOV];
+ struct msghdr kern_msg;
+ char addr[MAX_SOCK_ADDR];
+ struct socket *sock;
+ struct iovec *iov = iovstack;
+ struct sockaddr *uaddr;
+ int *uaddr_len;
+ unsigned long cmsg_ptr;
+ int err, total_len, len = 0;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_recvmsg - entered - fd=%x, user_msg@=%p, user_flags=%x \n", fd, user_msg, user_flags);
+
+ if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
+ return -EFAULT;
+ if(kern_msg.msg_iovlen > UIO_MAXIOV)
+ return -EINVAL;
+
+ uaddr = kern_msg.msg_name;
+ uaddr_len = &user_msg->msg_namelen;
+ err = verify_iovec32(&kern_msg, iov, addr, VERIFY_WRITE);
+ if (err < 0)
+ goto out;
+ total_len = err;
+
+ cmsg_ptr = (unsigned long) kern_msg.msg_control;
+ kern_msg.msg_flags = 0;
+
+ sock = sockfd_lookup(fd, &err);
+ if (sock != NULL) {
+ struct scm_cookie scm;
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ user_flags |= MSG_DONTWAIT;
+ memset(&scm, 0, sizeof(scm));
+ err = sock->ops->recvmsg(sock, &kern_msg, total_len,
+ user_flags, &scm);
+ if(err >= 0) {
+ len = err;
+ if(!kern_msg.msg_control) {
+ if(sock->passcred || scm.fp)
+ kern_msg.msg_flags |= MSG_CTRUNC;
+ if(scm.fp)
+ __scm_destroy(&scm);
+ } else {
+ /* If recvmsg processing itself placed some
+ * control messages into user space, it's is
+ * using 64-bit CMSG processing, so we need
+ * to fix it up before we tack on more stuff.
+ */
+ if((unsigned long) kern_msg.msg_control != cmsg_ptr)
+ cmsg32_recvmsg_fixup(&kern_msg, cmsg_ptr);
+
+ /* Wheee... */
+ if(sock->passcred)
+ put_cmsg32(&kern_msg,
+ SOL_SOCKET, SCM_CREDENTIALS,
+ sizeof(scm.creds), &scm.creds);
+ if(scm.fp != NULL)
+ scm_detach_fds32(&kern_msg, &scm);
+ }
+ }
+ sockfd_put(sock);
+ }
+
+ if (uaddr != NULL && err >= 0 && kern_msg.msg_namelen)
+ err = move_addr_to_user(addr, kern_msg.msg_namelen, uaddr, uaddr_len);
+ if(cmsg_ptr != 0 && err >= 0) {
+ unsigned long ucmsg_ptr = ((unsigned long)kern_msg.msg_control);
+ __kernel_size_t32 uclen = (__kernel_size_t32) (ucmsg_ptr - cmsg_ptr);
+ err |= __put_user(uclen, &user_msg->msg_controllen);
+ }
+ if(err >= 0)
+ err = __put_user(kern_msg.msg_flags, &user_msg->msg_flags);
+ if(kern_msg.msg_iov != iov)
+ kfree(kern_msg.msg_iov);
+out:
+ if(err < 0)
+ return err;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_recvmsg - exited w/ %lx \n", len);
+ return len;
+}
+
+/*
+ * count32() counts the number of arguments/envelopes
+ */
+static int count32(u32 * argv, int max)
+{
+ int i = 0;
+
+ if (argv != NULL) {
+ for (;;) {
+ u32 p; int error;
+
+ error = get_user(p,argv);
+ if (error)
+ return error;
+ if (!p)
+ break;
+ argv++;
+ if (++i > max)
+ return -E2BIG;
+ }
+ }
+ return i;
+}
+
+/*
+ * 'copy_string32()' copies argument/envelope strings from user
+ * memory to free pages in kernel mem. These are in a format ready
+ * to be put directly into the top of new user memory.
+ */
+static int copy_strings32(int argc, u32 * argv, struct linux_binprm *bprm)
+{
+ while (argc-- > 0) {
+ u32 str;
+ int len;
+ unsigned long pos;
+
+ if (get_user(str, argv + argc) ||
+ !str ||
+ !(len = strnlen_user((char *)A(str), bprm->p)))
+ return -EFAULT;
+
+ if (bprm->p < len)
+ return -E2BIG;
+
+ bprm->p -= len;
+
+ pos = bprm->p;
+ while (len) {
+ char *kaddr;
+ struct page *page;
+ int offset, bytes_to_copy, new, err;
+
+ offset = pos % PAGE_SIZE;
+ page = bprm->page[pos / PAGE_SIZE];
+ new = 0;
+ if (!page) {
+ page = alloc_page(GFP_USER);
+ bprm->page[pos / PAGE_SIZE] = page;
+ if (!page)
+ return -ENOMEM;
+ new = 1;
+ }
+ kaddr = (char *)kmap(page);
+
+ if (new && offset)
+ memset(kaddr, 0, offset);
+ bytes_to_copy = PAGE_SIZE - offset;
+ if (bytes_to_copy > len) {
+ bytes_to_copy = len;
+ if (new)
+ memset(kaddr+offset+len, 0,
+ PAGE_SIZE-offset-len);
+ }
+
+ err = copy_from_user(kaddr + offset, (char *)A(str),
+ bytes_to_copy);
+ flush_page_to_ram(page);
+ kunmap((unsigned long)kaddr);
+
+ if (err)
+ return -EFAULT;
+
+ pos += bytes_to_copy;
+ str += bytes_to_copy;
+ len -= bytes_to_copy;
+ }
+ }
+ return 0;
+}
+
+/*
+ * sys32_execve() executes a new program.
+ */
+static int do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
+{
+ struct linux_binprm bprm;
+ struct file * file;
+ int retval;
+ int i;
+
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+ memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
+
+ file = open_exec(filename);
+
+ retval = PTR_ERR(file);
+ if (IS_ERR(file))
+ return retval;
+
+ bprm.file = file;
+ bprm.filename = filename;
+ bprm.sh_bang = 0;
+ bprm.loader = 0;
+ bprm.exec = 0;
+ if ((bprm.argc = count32(argv, bprm.p / sizeof(u32))) < 0) {
+ allow_write_access(file);
+ fput(file);
+ return bprm.argc;
+ }
+ if ((bprm.envc = count32(envp, bprm.p / sizeof(u32))) < 0) {
+ allow_write_access(file);
+ fput(file);
+ return bprm.argc;
+ }
+
+ retval = prepare_binprm(&bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
+ retval = copy_strings32(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = copy_strings32(bprm.argc, argv, &bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = search_binary_handler(&bprm, regs);
+ if (retval >= 0)
+ /* execve success */
+ return retval;
+
+out:
+ /* Something went wrong, return the inode and free the argument pages*/
+ allow_write_access(bprm.file);
+ if (bprm.file)
+ fput(bprm.file);
+
+ for (i=0 ; i<MAX_ARG_PAGES ; i++)
+ if (bprm.page[i])
+ __free_page(bprm.page[i]);
+
+ return retval;
+}
+
+asmlinkage long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4, unsigned long a5,
+ struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ ifppcdebug(PPCDBG_SYS32) {
+ udbg_printf("sys32_execve - entered - pid=%ld, comm=%s \n", current->pid, current->comm);
+ //PPCDBG(PPCDBG_SYS32NI, " a0=%lx, a1=%lx, a2=%lx, a3=%lx, a4=%lx, a5=%lx, regs=%p \n", a0, a1, a2, a3, a4, a5, regs);
+ }
+
+ filename = getname((char *) a0);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+
+ error = do_execve32(filename, (u32*) a1, (u32*) a2, regs);
+
+ if (error == 0)
+ current->ptrace &= ~PT_DTRACE;
+ putname(filename);
+
+out:
+ ifppcdebug(PPCDBG_SYS32) {
+ udbg_printf("sys32_execve - exited - returning %x - pid=%ld \n", error, current->pid);
+ //udbg_printf("sys32_execve - at exit - regs->gpr[1]=%lx, gpr[3]=%lx, gpr[4]=%lx, gpr[5]=%lx, gpr[6]=%lx \n", regs->gpr[1], regs->gpr[3], regs->gpr[4], regs->gpr[5], regs->gpr[6]);
+ }
+ return error;
+}
+
+/* Set up a thread for executing a new program. */
+void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
+{
+ set_fs(USER_DS);
+ memset(regs->gpr, 0, sizeof(regs->gpr));
+ memset(®s->ctr, 0, 4 * sizeof(regs->ctr));
+ regs->nip = nip;
+ regs->gpr[1] = sp;
+ regs->msr = MSR_USER32;
+ if (last_task_used_math == current)
+ last_task_used_math = 0;
+ current->thread.fpscr = 0;
+}
+
+extern asmlinkage int sys_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+
+/* Note: it is necessary to treat option as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_prctl - running - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return sys_prctl((int)option,
+ (unsigned long) arg2,
+ (unsigned long) arg3,
+ (unsigned long) arg4,
+ (unsigned long) arg5);
+}
+
+extern asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval);
+
+/* Note: it is necessary to treat pid as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_sched_rr_get_interval(u32 pid, struct timespec32 *interval)
+{
+ struct timespec t;
+ int ret;
+ mm_segment_t old_fs = get_fs ();
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sched_rr_get_interval - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ set_fs (KERNEL_DS);
+ ret = sys_sched_rr_get_interval((int)pid, &t);
+ set_fs (old_fs);
+ if (put_user (t.tv_sec, &interval->tv_sec) ||
+ __put_user (t.tv_nsec, &interval->tv_nsec))
+ return -EFAULT;
+
+ PPCDBG(PPCDBG_SYS32, "sys32_sched_rr_get_interval - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return ret;
+}
+
+extern asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn, unsigned long off,
+ unsigned long len, unsigned char *buf);
+
+asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
+{
+
+ PPCDBG(PPCDBG_SYS32, "sys32_pciconfig_read - running - pid=%ld current=%lx comm=%s\n", current->pid, current, current->comm);
+
+ return sys_pciconfig_read((unsigned long) bus,
+ (unsigned long) dfn,
+ (unsigned long) off,
+ (unsigned long) len,
+ (unsigned char *)AA(ubuf));
+}
+
+
+
+
+extern asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn, unsigned long off,
+ unsigned long len, unsigned char *buf);
+
+asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
+{
+
+ PPCDBG(PPCDBG_SYS32, "sys32_pciconfig_write - running - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ return sys_pciconfig_write((unsigned long) bus,
+ (unsigned long) dfn,
+ (unsigned long) off,
+ (unsigned long) len,
+ (unsigned char *)AA(ubuf));
+}
+
+extern asmlinkage int sys_newuname(struct new_utsname * name);
+
+asmlinkage int ppc64_newuname(struct new_utsname * name)
+{
+ int errno = sys_newuname(name);
+
+ if (current->personality == PER_LINUX32 && !errno) {
+ if(copy_to_user(name->machine, "ppc\0\0", 8)) {
+ errno = -EFAULT;
+ }
+ }
+ return errno;
+}
+
+extern asmlinkage long sys_personality(unsigned long);
+
+asmlinkage int sys32_personality(unsigned long personality)
+{
+ int ret;
+ if (current->personality == PER_LINUX32 && personality == PER_LINUX)
+ personality = PER_LINUX32;
+ ret = sys_personality(personality);
+ if (ret == PER_LINUX32)
+ ret = PER_LINUX;
+ return ret;
+}
+
+
+
+extern asmlinkage long sys_access(const char * filename, int mode);
+
+/* Note: it is necessary to treat mode as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_access(const char * filename, u32 mode)
+{
+ return sys_access(filename, (int)mode);
+}
+
+
+extern asmlinkage int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6, struct pt_regs *regs);
+
+/* Note: it is necessary to treat p1, p2, p3, p4, p5, p7, and regs as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_clone(u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, struct pt_regs *regs)
+{
+ return sys_clone((int)p1, (int)p2, (int)p3, (int)p4, (int)p5, (int)p6, regs);
+}
+
+
+extern asmlinkage long sys_creat(const char * pathname, int mode);
+
+/* Note: it is necessary to treat mode as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_creat(const char * pathname, u32 mode)
+{
+ return sys_creat(pathname, (int)mode);
+}
+
+
+extern asmlinkage long sys_exit(int error_code);
+
+/* Note: it is necessary to treat error_code as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_exit(u32 error_code)
+{
+ return sys_exit((int)error_code);
+}
+
+
+extern asmlinkage long sys_wait4(pid_t pid, unsigned int * stat_addr, int options, struct rusage * ru);
+
+/* Note: it is necessary to treat pid and options as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_wait4(u32 pid, unsigned int * stat_addr, u32 options, struct rusage * ru)
+{
+ PPCDBG(PPCDBG_SYS32, "sys32_wait4 - running - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ if (!ru)
+ return sys_wait4((int)pid, stat_addr, options, NULL);
+ else {
+ struct rusage r;
+ int ret;
+ unsigned int status;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_wait4((int)pid, stat_addr ? &status : NULL, options, &r);
+ set_fs (old_fs);
+ if (put_rusage ((struct rusage32 *)ru, &r)) return -EFAULT;
+ if (stat_addr && put_user (status, stat_addr))
+ return -EFAULT;
+ return ret;
+ }
+
+}
+
+
+extern asmlinkage long sys_waitpid(pid_t pid, unsigned int * stat_addr, int options);
+
+/* Note: it is necessary to treat pid and options as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_waitpid(u32 pid, unsigned int * stat_addr, u32 options)
+{
+ return sys_waitpid((int)pid, stat_addr, (int)options);
+}
+
+
+extern asmlinkage int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, struct pt_regs *regs);
+
+/* Note: it is necessary to treat p1, p2, p3, p4, p5, and p6 as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_fork(u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, struct pt_regs *regs)
+{
+ return sys_fork((int)p1, (int)p2, (int)p3, (int)p4, (int)p5, (int)p6, regs);
+}
+
+
+extern asmlinkage long sys_getgroups(int gidsetsize, gid_t *grouplist);
+
+/* Note: it is necessary to treat gidsetsize as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t *grouplist)
+{
+ return sys_getgroups((int)gidsetsize, grouplist);
+}
+
+
+extern asmlinkage long sys_getpgid(pid_t pid);
+
+/* Note: it is necessary to treat pid as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_getpgid(u32 pid)
+{
+ return sys_getpgid((int)pid);
+}
+
+
+extern asmlinkage long sys_getpriority(int which, int who);
+
+/* Note: it is necessary to treat which and who as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_getpriority(u32 which, u32 who)
+{
+ return sys_getpriority((int)which, (int)who);
+}
+
+
+extern asmlinkage long sys_getsid(pid_t pid);
+
+/* Note: it is necessary to treat pid as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_getsid(u32 pid)
+{
+ return sys_getsid((int)pid);
+}
+
+
+extern asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
+
+/* Note: it is necessary to treat on as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_ioperm(unsigned long from, unsigned long num, u32 on)
+{
+ return sys_ioperm(from, num, (int)on);
+}
+
+
+extern asmlinkage int sys_iopl(int a1, int a2, int a3, int a4);
+
+/* Note: it is necessary to treat a1, a2, a3, and a4 as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_iopl(u32 a1, u32 a2, u32 a3, u32 a4)
+{
+ return sys_iopl((int)a1, (int)a2, (int)a3, (int)a4);
+}
+
+
+extern asmlinkage long sys_kill(int pid, int sig);
+
+/* Note: it is necessary to treat pid and sig as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_kill(u32 pid, u32 sig)
+{
+ return sys_kill((int)pid, (int)sig);
+}
+
+
+extern asmlinkage long sys_mkdir(const char * pathname, int mode);
+
+/* Note: it is necessary to treat mode as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_mkdir(const char * pathname, u32 mode)
+{
+ return sys_mkdir(pathname, (int)mode);
+}
+
+
+extern asmlinkage long sys_mlockall(int flags);
+
+/* Note: it is necessary to treat flags as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_mlockall(u32 flags)
+{
+ return sys_mlockall((int)flags);
+}
+
+
+extern asmlinkage int sys_modify_ldt(int a1, int a2, int a3, int a4);
+
+/* Note: it is necessary to treat a1, a2, a3, and a4 as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_modify_ldt(u32 a1, u32 a2, u32 a3, u32 a4)
+{
+ return sys_modify_ldt((int)a1, (int)a2, (int)a3, (int)a4);
+}
+
+
+extern asmlinkage long sys_msync(unsigned long start, size_t len, int flags);
+
+/* Note: it is necessary to treat flags as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_msync(unsigned long start, size_t len, u32 flags)
+{
+ return sys_msync(start, len, (int)flags);
+}
+
+
+extern asmlinkage long sys_nice(int increment);
+
+/* Note: it is necessary to treat increment as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_nice(u32 increment)
+{
+ return sys_nice((int)increment);
+}
+
+
+extern asmlinkage long sys_open(const char * filename, int flags, int mode);
+
+/* Note: it is necessary to treat flags and mode as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_open(const char * filename, int flags, int mode)
+{
+ return sys_open(filename, (int)flags, (int)mode);
+}
+
+
+extern asmlinkage long sys_readlink(const char * path, char * buf, int bufsiz);
+
+/* Note: it is necessary to treat bufsiz as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_readlink(const char * path, char * buf, u32 bufsiz)
+{
+ return sys_readlink(path, buf, (int)bufsiz);
+}
+
+
+extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void * arg);
+
+/* Note: it is necessary to treat magic1 and magic2 as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_reboot(u32 magic1, u32 magic2, unsigned int cmd, void * arg)
+{
+ return sys_reboot((int)magic1, (int)magic2, cmd, arg);
+}
+
+
+extern asmlinkage long sys_sched_get_priority_max(int policy);
+
+/* Note: it is necessary to treat option as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sched_get_priority_max(u32 policy)
+{
+ return sys_sched_get_priority_max((int)policy);
+}
+
+
+extern asmlinkage long sys_sched_get_priority_min(int policy);
+
+/* Note: it is necessary to treat policy as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sched_get_priority_min(u32 policy)
+{
+ return sys_sched_get_priority_min((int)policy);
+}
+
+
+extern asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param);
+
+/* Note: it is necessary to treat pid as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param *param)
+{
+ return sys_sched_getparam((int)pid, param);
+}
+
+
+extern asmlinkage long sys_sched_getscheduler(pid_t pid);
+
+/* Note: it is necessary to treat pid as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sched_getscheduler(u32 pid)
+{
+ return sys_sched_getscheduler((int)pid);
+}
+
+
+extern asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param);
+
+/* Note: it is necessary to treat pid as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param *param)
+{
+ return sys_sched_setparam((int)pid, param);
+}
+
+
+extern asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param);
+
+/* Note: it is necessary to treat pid and policy as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param *param)
+{
+ return sys_sched_setscheduler((int)pid, (int)policy, param);
+}
+
+
+extern asmlinkage long sys_setdomainname(char *name, int len);
+
+/* Note: it is necessary to treat len as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_setdomainname(char *name, u32 len)
+{
+ return sys_setdomainname(name, (int)len);
+}
+
+
+extern asmlinkage long sys_setgroups(int gidsetsize, gid_t *grouplist);
+
+/* Note: it is necessary to treat gidsetsize as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_setgroups(u32 gidsetsize, gid_t *grouplist)
+{
+ return sys_setgroups((int)gidsetsize, grouplist);
+}
+
+
+extern asmlinkage long sys_sethostname(char *name, int len);
+
+/* Note: it is necessary to treat len as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_sethostname(char *name, u32 len)
+{
+ return sys_sethostname(name, (int)len);
+}
+
+
+extern asmlinkage long sys_setpgid(pid_t pid, pid_t pgid);
+
+/* Note: it is necessary to treat pid and pgid as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_setpgid(u32 pid, u32 pgid)
+{
+ return sys_setpgid((int)pid, (int)pgid);
+}
+
+
+extern asmlinkage long sys_setpriority(int which, int who, int niceval);
+
+/* Note: it is necessary to treat which, who, and niceval as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_setpriority(u32 which, u32 who, u32 niceval)
+{
+ return sys_setpriority((int)which, (int)who, (int)niceval);
+}
+
+
+extern asmlinkage long sys_ssetmask(int newmask);
+
+/* Note: it is necessary to treat newmask as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_ssetmask(u32 newmask)
+{
+ return sys_ssetmask((int) newmask);
+}
+
+
+extern asmlinkage long sys_swapon(const char * specialfile, int swap_flags);
+
+/* Note: it is necessary to treat swap_flags as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_swapon(const char * specialfile, u32 swap_flags)
+{
+ return sys_swapon(specialfile, (int)swap_flags);
+}
+
+
+extern asmlinkage long sys_syslog(int type, char * buf, int len);
+
+/* Note: it is necessary to treat type and len as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_syslog(u32 type, char * buf, u32 len)
+{
+ return sys_syslog((int)type, buf, (int)len);
+}
+
+
+extern asmlinkage long sys_umask(int mask);
+
+/* Note: it is necessary to treat mask as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_umask(u32 mask)
+{
+ return sys_umask((int)mask);
+}
+
+
+extern asmlinkage long sys_umount(char * name, int flags);
+
+/* Note: it is necessary to treat flags as an unsigned int,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage long sys32_umount(char * name, u32 flags)
+{
+ return sys_umount(name, (int)flags);
+}
+
+
+extern asmlinkage int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, struct pt_regs *regs);
+
+/* Note: it is necessary to treat p1, p2, p3, p4, p5, and p6 as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_vfork(u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, struct pt_regs *regs)
+{
+ return sys_vfork((int)p1, (int)p2, (int)p3, (int)p4, (int)p5, (int)p6, regs);
+}
+
+
+extern asmlinkage int sys_vm86(int a1, int a2, int a3, int a4);
+
+/* Note: it is necessary to treat a1, a2, a3, and a4 as unsigned ints,
+ * with the corresponding cast to a signed int to insure that the
+ * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
+ * and the register representation of a signed int (msr in 64-bit mode) is performed.
+ */
+asmlinkage int sys32_vm86(u32 a1, u32 a2, u32 a3, u32 a4)
+{
+ return sys_vm86((int)a1, (int)a2, (int)a3, (int)a4);
+}
+
+
+
+
+
+extern asmlinkage ssize_t sys_pread(unsigned int fd, char * buf,
+ size_t count, loff_t pos);
+
+extern asmlinkage ssize_t sys_pwrite(unsigned int fd, const char * buf,
+ size_t count, loff_t pos);
+
+typedef __kernel_ssize_t32 ssize_t32;
+
+asmlinkage ssize_t32 sys32_pread(unsigned int fd, char *ubuf,
+ __kernel_size_t32 count, u32 reg6, u32 poshi, u32 poslo)
+{
+ return sys_pread(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+}
+
+asmlinkage ssize_t32 sys32_pwrite(unsigned int fd, char *ubuf,
+ __kernel_size_t32 count, u32 reg6 ,u32 poshi, u32 poslo)
+{
+ return sys_pwrite(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+}
+
+
+extern asmlinkage long sys_truncate(const char * path, unsigned long length);
+extern asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+
+asmlinkage int sys32_truncate64(const char * path, u32 reg4, unsigned long high, unsigned long low)
+{
+ if ((int)high < 0)
+ return -EINVAL;
+ else
+ return sys_truncate(path, (high << 32) | low);
+}
+
+asmlinkage int sys32_ftruncate64(unsigned int fd, u32 reg4, unsigned long high, unsigned long low)
+{
+ if ((int)high < 0)
+ return -EINVAL;
+ else
+ return sys_ftruncate(fd, (high << 32) | low);
+}
+
+
+
+asmlinkage long sys32_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ if (cmd >= F_GETLK64 && cmd <= F_SETLKW64)
+ return sys_fcntl(fd, cmd + F_GETLK - F_GETLK64, arg);
+ return sys32_fcntl(fd, cmd, arg);
+}
+
+
+
+
+struct __sysctl_args32 {
+ u32 name;
+ int nlen;
+ u32 oldval;
+ u32 oldlenp;
+ u32 newval;
+ u32 newlen;
+ u32 __unused[4];
+};
+
+extern asmlinkage long sys32_sysctl(struct __sysctl_args32 *args)
+{
+ struct __sysctl_args32 tmp;
+ int error;
+ size_t oldlen, *oldlenp = NULL;
+ unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7;
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ if (tmp.oldval && tmp.oldlenp) {
+ /* Duh, this is ugly and might not work if sysctl_args
+ is in read-only memory, but do_sysctl does indirectly
+ a lot of uaccess in both directions and we'd have to
+ basically copy the whole sysctl.c here, and
+ glibc's __sysctl uses rw memory for the structure
+ anyway. */
+ if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) ||
+ put_user(oldlen, (size_t *)addr))
+ return -EFAULT;
+ oldlenp = (size_t *)addr;
+ }
+
+ lock_kernel();
+ error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval),
+ oldlenp, (void *)A(tmp.newval), tmp.newlen);
+ unlock_kernel();
+ if (oldlenp) {
+ if (!error) {
+ if (get_user(oldlen, (size_t *)addr) ||
+ put_user(oldlen, (u32 *)A(tmp.oldlenp)))
+ error = -EFAULT;
+ }
+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
+ }
+ return error;
+}
+
+asmlinkage long sys32_time(__kernel_time_t32* tloc)
+{
+ __kernel_time_t32 secs;
+
+ struct timeval tv;
+
+ do_gettimeofday( &tv );
+ secs = tv.tv_sec;
+
+ if (tloc) {
+ if (put_user(secs,tloc))
+ secs = -EFAULT;
+ }
+
+ return secs;
+}
--- /dev/null
+/*
+ * linux/arch/ppc/kernel/sys_ppc.c
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/i386/kernel/sys_i386.c"
+ * Adapted from the i386 version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras (paulus@cs.anu.edu.au).
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/PPC
+ * platform.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/sys.h>
+#include <linux/ipc.h>
+#include <linux/utsname.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+#include <asm/semaphore.h>
+#include <asm/ppcdebug.h>
+
+#include <asm/time.h>
+
+extern unsigned long wall_jiffies;
+#define USEC_PER_SEC (1000000)
+
+void
+check_bugs(void)
+{
+}
+
+asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
+{
+ printk(KERN_ERR "sys_ioperm()\n");
+ return -EIO;
+}
+
+int sys_iopl(int a1, int a2, int a3, int a4)
+{
+ printk(KERN_ERR "sys_iopl(%x, %x, %x, %x)!\n", a1, a2, a3, a4);
+ return (-ENOSYS);
+}
+
+int sys_vm86(int a1, int a2, int a3, int a4)
+{
+ printk(KERN_ERR "sys_vm86(%x, %x, %x, %x)!\n", a1, a2, a3, a4);
+ return (-ENOSYS);
+}
+
+int sys_modify_ldt(int a1, int a2, int a3, int a4)
+{
+ printk(KERN_ERR "sys_modify_ldt(%x, %x, %x, %x)!\n", a1, a2, a3, a4);
+ return (-ENOSYS);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int
+sys_ipc (uint call, int first, int second, long third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_ipc - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ ret = -EINVAL;
+ switch (call) {
+ case SEMOP:
+ ret = sys_semop (first, (struct sembuf *)ptr, second);
+ break;
+ case SEMGET:
+ ret = sys_semget (first, second, third);
+ break;
+ case SEMCTL: {
+ union semun fourth;
+
+ if (!ptr)
+ break;
+ if ((ret = verify_area (VERIFY_READ, ptr, sizeof(long)))
+ || (ret = get_user(fourth.__pad, (void **)ptr)))
+ break;
+ ret = sys_semctl (first, second, third, fourth);
+ break;
+ }
+ case MSGSND:
+ ret = sys_msgsnd (first, (struct msgbuf *) ptr, second, third);
+ break;
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+
+ if (!ptr)
+ break;
+ if ((ret = verify_area (VERIFY_READ, ptr, sizeof(tmp)))
+ || (ret = copy_from_user(&tmp,
+ (struct ipc_kludge *) ptr,
+ sizeof (tmp))))
+ break;
+ ret = sys_msgrcv (first, (struct msgbuf *)(unsigned long)tmp.msgp,
+ second, tmp.msgtyp, third);
+ break;
+ }
+ default:
+ ret = sys_msgrcv (first, (struct msgbuf *) ptr,
+ second, fifth, third);
+ break;
+ }
+ break;
+ case MSGGET:
+ ret = sys_msgget ((key_t) first, second);
+ break;
+ case MSGCTL:
+ ret = sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ break;
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+
+ if ((ret = verify_area(VERIFY_WRITE, (ulong*) third,
+ sizeof(ulong))))
+ break;
+ ret = sys_shmat (first, (char *) ptr, second, &raddr);
+ if (ret)
+ break;
+ ret = put_user (raddr, (ulong *) third);
+ break;
+ }
+ case 1: /* iBCS2 emulator entry point */
+ if (!segment_eq(get_fs(), get_ds()))
+ break;
+ ret = sys_shmat (first, (char *) ptr, second,
+ (ulong *) third);
+ break;
+ }
+ break;
+ case SHMDT:
+ ret = sys_shmdt ((char *)ptr);
+ break;
+ case SHMGET:
+ ret = sys_shmget (first, second, third);
+ break;
+ case SHMCTL:
+ ret = sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ break;
+ }
+
+ PPCDBG(PPCDBG_SYS64X, "sys_ipc - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return ret;
+}
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(int *fildes)
+{
+ int fd[2];
+ int error;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_pipe - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+
+ PPCDBG(PPCDBG_SYS64X, "sys_pipe - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return error;
+}
+
+asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset)
+{
+ struct file * file = NULL;
+ unsigned long ret = -EBADF;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_mmap - entered - addr=%lx, len=%lx - pid=%ld, comm=%s \n", addr, len, current->pid, current->comm);
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (!(file = fget(fd)))
+ goto out;
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flags, offset);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
+
+out:
+
+ PPCDBG(PPCDBG_SYS64X, "sys_mmap - exited - ret=%x \n", ret);
+
+ return ret;
+}
+
+asmlinkage int sys_pause(void)
+{
+
+ PPCDBG(PPCDBG_SYS64X, "sys_pause - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+
+ PPCDBG(PPCDBG_SYS64X, "sys_pause - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return -ERESTARTNOHAND;
+}
+
+static int __init set_fakeppc(char *str)
+{
+ if (*str)
+ return 0;
+ init_task.personality = PER_LINUX32;
+ return 1;
+}
+__setup("fakeppc", set_fakeppc);
+
+asmlinkage int sys_uname(struct old_utsname * name)
+{
+ int err = -EFAULT;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_uname - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ down_read(&uts_sem);
+ if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
+ err = 0;
+ up_read(&uts_sem);
+
+ PPCDBG(PPCDBG_SYS64X, "sys_uname - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return err;
+}
+
+asmlinkage int sys_olduname(struct oldold_utsname * name)
+{
+ int error;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_olduname - entered - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+
+ if (!name)
+ return -EFAULT;
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ return -EFAULT;
+
+ down_read(&uts_sem);
+ error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
+ error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
+ error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
+ error -= __put_user(0,name->release+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
+ error -= __put_user(0,name->version+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
+ error = __put_user(0,name->machine+__OLD_UTS_LEN);
+ up_read(&uts_sem);
+
+ error = error ? -EFAULT : 0;
+
+ PPCDBG(PPCDBG_SYS64X, "sys_olduname - exited - pid=%ld current=%lx comm=%s \n", current->pid, current, current->comm);
+ return error;
+}
+
+asmlinkage time_t sys64_time(time_t* tloc)
+{
+ time_t secs;
+ time_t usecs;
+
+ long tb_delta = tb_ticks_since(tb_last_stamp);
+ tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
+
+ secs = xtime.tv_sec;
+ usecs = xtime.tv_usec + tb_delta / tb_ticks_per_usec;
+ while (usecs >= USEC_PER_SEC) {
+ ++secs;
+ usecs -= USEC_PER_SEC;
+ }
+
+ if (tloc) {
+ if (put_user(secs,tloc))
+ secs = -EFAULT;
+ }
+
+ return secs;
+}
--- /dev/null
+/*
+ *
+ * Common time routines among all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
+ * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
+ *
+ * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
+ * to make clock more stable (2.4.0-test5). The only thing
+ * that this code assumes is that the timebases have been synchronized
+ * by firmware on SMP and are never stopped (never do sleep
+ * on SMP then, nap and doze are OK).
+ *
+ * Speeded up do_gettimeofday by getting rid of references to
+ * xtime (which required locks for consistency). (mikejc@us.ibm.com)
+ *
+ * TODO (not necessarily in this file):
+ * - improve precision and reproducibility of timebase frequency
+ * measurement at boot time. (for iSeries, we calibrate the timebase
+ * against the Titan chip's clock.)
+ * - for astronomical applications: add a new function to get
+ * non ambiguous timestamps even around leap seconds. This needs
+ * a new timestamp format and a good name.
+ *
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/time.h>
+#include <linux/init.h>
+
+#include <asm/segment.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/nvram.h>
+#include <asm/cache.h>
+#include <asm/machdep.h>
+#ifdef CONFIG_PPC_ISERIES
+#include <asm/iSeries/HvCallXm.h>
+#endif
+#include <asm/uaccess.h>
+
+#include <asm/time.h>
+#include <asm/ppcdebug.h>
+
+void smp_local_timer_interrupt(struct pt_regs *);
+
+/* keep track of when we need to update the rtc */
+time_t last_rtc_update;
+extern rwlock_t xtime_lock;
+extern int piranha_simulator;
+#ifdef CONFIG_PPC_ISERIES
+unsigned long iSeries_recal_titan = 0;
+unsigned long iSeries_recal_tb = 0;
+static unsigned long first_settimeofday = 1;
+#endif
+
+#define XSEC_PER_SEC (1024*1024)
+#define USEC_PER_SEC (1000000)
+
+unsigned long tb_ticks_per_jiffy;
+unsigned long tb_ticks_per_usec;
+unsigned long tb_ticks_per_sec;
+unsigned long tb_to_xs;
+unsigned tb_to_us;
+spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+
+struct gettimeofday_struct do_gtod;
+
+extern unsigned long wall_jiffies;
+extern unsigned long lpEvent_count;
+extern int smp_tb_synchronized;
+
+extern unsigned long prof_cpu_mask;
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+extern char _stext;
+
+static inline void ppc_do_profile (unsigned long nip)
+{
+ if (!prof_buffer)
+ return;
+
+ /*
+ * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
+ * (default is all CPUs.)
+ */
+ if (!((1<<smp_processor_id()) & prof_cpu_mask))
+ return;
+
+ nip -= (unsigned long) &_stext;
+ nip >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds EIP values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (nip > prof_len-1)
+ nip = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[nip]);
+}
+
+
+static __inline__ void timer_check_rtc(void)
+{
+ /*
+ * update the rtc when needed, this should be performed on the
+ * right fraction of a second. Half or full second ?
+ * Full second works on mk48t59 clocks, others need testing.
+ * Note that this update is basically only used through
+ * the adjtimex system calls. Setting the HW clock in
+ * any other way is a /dev/rtc and userland business.
+ * This is still wrong by -0.5/+1.5 jiffies because of the
+ * timer interrupt resolution and possible delay, but here we
+ * hit a quantization limit which can only be solved by higher
+ * resolution timers and decoupling time management from timer
+ * interrupts. This is also wrong on the clocks
+ * which require being written at the half second boundary.
+ * We should have an rtc call that only sets the minutes and
+ * seconds like on Intel to avoid problems with non UTC clocks.
+ */
+ if ( (time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec - last_rtc_update >= 659 &&
+ abs(xtime.tv_usec - (1000000-1000000/HZ)) < 500000/HZ &&
+ jiffies - wall_jiffies == 1) {
+ struct rtc_time tm;
+ to_tm(xtime.tv_sec+1, &tm);
+ tm.tm_year -= 1900;
+ tm.tm_mon -= 1;
+ if (ppc_md.set_rtc_time(&tm) == 0)
+ last_rtc_update = xtime.tv_sec+1;
+ else
+ /* Try again one minute later */
+ last_rtc_update += 60;
+ }
+}
+
+#ifdef CONFIG_PPC_ISERIES
+
+/*
+ * This function recalibrates the timebase based on the 49-bit time-of-day value in the Titan chip.
+ * The Titan is much more accurate than the value returned by the service processor for the
+ * timebase frequency.
+ */
+
+static void iSeries_tb_recal(void)
+{
+ struct div_result divres;
+ unsigned long titan, tb;
+ tb = get_tb();
+ titan = HvCallXm_loadTod();
+ if ( iSeries_recal_titan ) {
+ unsigned long tb_ticks = tb - iSeries_recal_tb;
+ unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
+ unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
+ unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
+ long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
+ char sign = '+';
+ /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
+ new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
+
+ if ( tick_diff < 0 ) {
+ tick_diff = -tick_diff;
+ sign = '-';
+ }
+ if ( tick_diff ) {
+ if ( tick_diff < tb_ticks_per_jiffy/25 ) {
+ printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
+ new_tb_ticks_per_jiffy, sign, tick_diff );
+ tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
+ tb_ticks_per_sec = new_tb_ticks_per_sec;
+ div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
+ do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
+ tb_to_xs = divres.result_low;
+ do_gtod.varp->tb_to_xs = tb_to_xs;
+ }
+ else {
+ printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
+ " new tb_ticks_per_jiffy = %lu\n"
+ " old tb_ticks_per_jiffy = %lu\n",
+ new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
+ }
+ }
+ }
+ iSeries_recal_titan = titan;
+ iSeries_recal_tb = tb;
+}
+#endif
+
+/*
+ * For iSeries shared processors, we have to let the hypervisor
+ * set the hardware decrementer. We set a virtual decrementer
+ * in the ItLpPaca and call the hypervisor if the virtual
+ * decrementer is less than the current value in the hardware
+ * decrementer. (almost always the new decrementer value will
+ * be greater than the current hardware decementer so the hypervisor
+ * call will not be needed)
+ */
+
+unsigned long tb_last_stamp=0;
+
+/*
+ * timer_interrupt - gets called when the decrementer overflows,
+ * with interrupts disabled.
+ */
+int timer_interrupt(struct pt_regs * regs)
+{
+ int next_dec;
+ unsigned long cur_tb;
+ struct Paca * paca = (struct Paca *)mfspr(SPRG3);
+ unsigned long cpu = paca->xPacaIndex;
+ struct ItLpQueue * lpq;
+
+ irq_enter(cpu);
+
+#ifndef CONFIG_PPC_ISERIES
+ if (!user_mode(regs))
+ ppc_do_profile(instruction_pointer(regs));
+#endif
+
+ paca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
+
+ while (paca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
+
+#ifdef CONFIG_SMP
+ smp_local_timer_interrupt(regs);
+#endif
+ if (cpu == 0) {
+ write_lock(&xtime_lock);
+ tb_last_stamp = paca->next_jiffy_update_tb;
+ do_timer(regs);
+ timer_check_rtc();
+ write_unlock(&xtime_lock);
+ }
+ paca->next_jiffy_update_tb += tb_ticks_per_jiffy;
+ }
+
+ next_dec = paca->next_jiffy_update_tb - cur_tb;
+ if (next_dec > paca->default_decr)
+ next_dec = paca->default_decr;
+ set_dec(next_dec);
+
+ lpq = paca->lpQueuePtr;
+ if (lpq && ItLpQueue_isLpIntPending(lpq))
+ lpEvent_count += ItLpQueue_process(lpq, regs);
+
+ irq_exit(cpu);
+
+ if (softirq_pending(cpu))
+ do_softirq();
+
+ return 1;
+}
+
+
+/*
+ * This version of gettimeofday has microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long sec, usec, tb_ticks;
+ unsigned long xsec, tb_xsec;
+ struct gettimeofday_vars * temp_varp;
+ unsigned long temp_tb_to_xs, temp_stamp_xsec;
+
+ /* These calculations are faster (gets rid of divides)
+ * if done in units of 1/2^20 rather than microseconds.
+ * The conversion to microseconds at the end is done
+ * without a divide (and in fact, without a multiply) */
+ tb_ticks = get_tb() - do_gtod.tb_orig_stamp;
+ temp_varp = do_gtod.varp;
+ temp_tb_to_xs = temp_varp->tb_to_xs;
+ temp_stamp_xsec = temp_varp->stamp_xsec;
+ tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs );
+ xsec = temp_stamp_xsec + tb_xsec;
+ sec = xsec / XSEC_PER_SEC;
+ xsec -= sec * XSEC_PER_SEC;
+ usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC;
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long delta_xsec;
+ long int tb_delta, new_usec, new_sec;
+ unsigned long new_xsec;
+
+ write_lock_irqsave(&xtime_lock, flags);
+ /* Updating the RTC is not the job of this code. If the time is
+ * stepped under NTP, the RTC will be update after STA_UNSYNC
+ * is cleared. Tool like clock/hwclock either copy the RTC
+ * to the system time, in which case there is no point in writing
+ * to the RTC again, or write to the RTC but then they don't call
+ * settimeofday to perform this operation.
+ */
+#ifdef CONFIG_PPC_ISERIES
+ if ( first_settimeofday ) {
+ iSeries_tb_recal();
+ first_settimeofday = 0;
+ }
+#endif
+ tb_delta = tb_ticks_since(tb_last_stamp);
+ tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
+
+ new_sec = tv->tv_sec;
+ new_usec = tv->tv_usec - tb_delta / tb_ticks_per_usec;
+ while (new_usec <0) {
+ new_sec--;
+ new_usec += USEC_PER_SEC;
+ }
+ xtime.tv_usec = new_usec;
+ xtime.tv_sec = new_sec;
+
+ /* In case of a large backwards jump in time with NTP, we want the
+ * clock to be updated as soon as the PLL is again in lock.
+ */
+ last_rtc_update = new_sec - 658;
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ delta_xsec = mulhdu( (tb_last_stamp-do_gtod.tb_orig_stamp), do_gtod.varp->tb_to_xs );
+ new_xsec = (new_usec * XSEC_PER_SEC) / USEC_PER_SEC;
+ new_xsec += new_sec * XSEC_PER_SEC;
+ if ( new_xsec > delta_xsec ) {
+ do_gtod.varp->stamp_xsec = new_xsec - delta_xsec;
+ }
+ else {
+ /* This is only for the case where the user is setting the time
+ * way back to a time such that the boot time would have been
+ * before 1970 ... eg. we booted ten days ago, and we are setting
+ * the time to Jan 5, 1970 */
+ do_gtod.varp->stamp_xsec = new_xsec;
+ do_gtod.tb_orig_stamp = tb_last_stamp;
+ }
+
+ write_unlock_irqrestore(&xtime_lock, flags);
+}
+
+/*
+ * This function is a copy of the architecture independent function
+ * but which calls do_settimeofday rather than setting the xtime
+ * fields itself. This way, the fields which are used for
+ * do_settimeofday get updated too.
+ */
+
+long ppc64_sys_stime(int * tptr)
+{
+ int value;
+ struct timeval myTimeval;
+
+ PPCDBG(PPCDBG_SYS32, "ppc64_sys_stime - entered - tptr=%p, *tptr=0x%x \n", tptr, *tptr);
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ if (get_user(value, tptr))
+ return -EFAULT;
+
+ myTimeval.tv_sec = value;
+ myTimeval.tv_usec = 0;
+
+ do_settimeofday(&myTimeval);
+
+ PPCDBG(PPCDBG_SYS32, "ppc64_sys_stime - exiting w/ 0 \n");
+ return 0;
+}
+
+void __init time_init(void)
+{
+ /* This function is only called on the boot processor */
+ unsigned long flags;
+ struct rtc_time tm;
+
+ ppc_md.calibrate_decr();
+
+ if ( ! piranha_simulator ) {
+ ppc_md.get_boot_time(&tm);
+ }
+ write_lock_irqsave(&xtime_lock, flags);
+ xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ tb_last_stamp = get_tb();
+ do_gtod.tb_orig_stamp = tb_last_stamp;
+ do_gtod.varp = &do_gtod.vars[0];
+ do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
+ do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
+ do_gtod.varp->tb_to_xs = tb_to_xs;
+ do_gtod.tb_to_us = tb_to_us;
+
+ xtime.tv_usec = 0;
+ last_rtc_update = xtime.tv_sec;
+ write_unlock_irqrestore(&xtime_lock, flags);
+
+#ifdef CONFIG_PPC_ISERIES
+ /* HACK HACK This allows the iSeries profiling to use /proc/profile */
+ prof_shift = 0;
+#endif
+
+ /* Not exact, but the timer interrupt takes care of this */
+ set_dec(tb_ticks_per_jiffy);
+}
+
+/*
+ * After adjtimex is called, adjust the conversion of tb ticks
+ * to microseconds to keep do_gettimeofday synchronized
+ * with ntpd.
+
+ * Use the time_freq and time_offset computed by adjtimex to
+ * adjust the frequency.
+*/
+
+void ppc_adjtimex(void)
+{
+ unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec;
+ unsigned long tb_ticks_per_sec_delta;
+ long delta_freq, ltemp;
+ struct div_result divres;
+ struct timeval my_tv;
+ unsigned long flags;
+ struct gettimeofday_vars * temp_varp;
+
+ if ( time_offset < 0 ) {
+ ltemp = -time_offset;
+ ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
+ ltemp >>= SHIFT_KG + time_constant;
+ ltemp = -ltemp;
+ }
+ else {
+ ltemp = time_offset;
+ ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
+ ltemp >>= SHIFT_KG + time_constant;
+ }
+ delta_freq = time_freq + ltemp;
+
+ den = 1000000 * (1 << (SHIFT_USEC - 8));
+ if ( delta_freq < 0 ) {
+ tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
+ new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
+ }
+ else {
+ tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
+ new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
+ }
+ tb_ticks = get_tb() - do_gtod.tb_orig_stamp;
+ div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres );
+ new_tb_to_xs = divres.result_low;
+ new_xsec = mulhdu( tb_ticks, new_tb_to_xs );
+
+ write_lock_irqsave( &xtime_lock, flags );
+ old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs );
+ new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
+
+ if (do_gtod.varp == &do_gtod.vars[0])
+ temp_varp = &do_gtod.vars[1];
+ else
+ temp_varp = &do_gtod.vars[0];
+ temp_varp->tb_to_xs = new_tb_to_xs;
+ temp_varp->stamp_xsec = new_stamp_xsec;
+ mb();
+ do_gtod.varp = temp_varp;
+
+ do_gettimeofday( &my_tv );
+ if ( xtime.tv_sec == my_tv.tv_sec )
+ xtime.tv_usec = my_tv.tv_usec;
+
+ write_unlock_irqrestore( &xtime_lock, flags );
+
+}
+
+
+#define TICK_SIZE tick
+#define FEBRUARY 2
+#define STARTOFTIME 1970
+#define SECDAY 86400L
+#define SECYR (SECDAY * 365)
+#define leapyear(year) ((year) % 4 == 0)
+#define days_in_year(a) (leapyear(a) ? 366 : 365)
+#define days_in_month(a) (month_days[(a) - 1])
+
+static int month_days[12] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+};
+
+/*
+ * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
+ */
+void GregorianDay(struct rtc_time * tm)
+{
+ int leapsToDate;
+ int lastYear;
+ int day;
+ int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
+
+ lastYear=tm->tm_year-1;
+
+ /*
+ * Number of leap corrections to apply up to end of last year
+ */
+ leapsToDate = lastYear/4 - lastYear/100 + lastYear/400;
+
+ /*
+ * This year is a leap year if it is divisible by 4 except when it is
+ * divisible by 100 unless it is divisible by 400
+ *
+ * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be
+ */
+ if((tm->tm_year%4==0) &&
+ ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
+ (tm->tm_mon>2))
+ {
+ /*
+ * We are past Feb. 29 in a leap year
+ */
+ day=1;
+ }
+ else
+ {
+ day=0;
+ }
+
+ day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
+ tm->tm_mday;
+
+ tm->tm_wday=day%7;
+}
+
+void to_tm(int tim, struct rtc_time * tm)
+{
+ register int i;
+ register long hms, day;
+
+ day = tim / SECDAY;
+ hms = tim % SECDAY;
+
+ /* Hours, minutes, seconds are easy */
+ tm->tm_hour = hms / 3600;
+ tm->tm_min = (hms % 3600) / 60;
+ tm->tm_sec = (hms % 3600) % 60;
+
+ /* Number of years in days */
+ for (i = STARTOFTIME; day >= days_in_year(i); i++)
+ day -= days_in_year(i);
+ tm->tm_year = i;
+
+ /* Number of months in days left */
+ if (leapyear(tm->tm_year))
+ days_in_month(FEBRUARY) = 29;
+ for (i = 1; day >= days_in_month(i); i++)
+ day -= days_in_month(i);
+ days_in_month(FEBRUARY) = 28;
+ tm->tm_mon = i;
+
+ /* Days are what is left over (+1) from all that. */
+ tm->tm_mday = day + 1;
+
+ /*
+ * Determine the day of week
+ */
+ GregorianDay(tm);
+}
+
+/* Auxiliary function to compute scaling factors */
+/* Actually the choice of a timebase running at 1/4 the of the bus
+ * frequency giving resolution of a few tens of nanoseconds is quite nice.
+ * It makes this computation very precise (27-28 bits typically) which
+ * is optimistic considering the stability of most processor clock
+ * oscillators and the precision with which the timebase frequency
+ * is measured but does not harm.
+ */
+unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
+ unsigned mlt=0, tmp, err;
+ /* No concern for performance, it's done once: use a stupid
+ * but safe and compact method to find the multiplier.
+ */
+
+ for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
+ if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp;
+ }
+
+ /* We might still be off by 1 for the best approximation.
+ * A side effect of this is that if outscale is too large
+ * the returned value will be zero.
+ * Many corner cases have been checked and seem to work,
+ * some might have been forgotten in the test however.
+ */
+
+ err = inscale*(mlt+1);
+ if (err <= inscale/2) mlt++;
+ return mlt;
+ }
+
+/*
+ * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
+ * result.
+ */
+
+void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
+ unsigned divisor, struct div_result *dr )
+{
+ unsigned long a,b,c,d, w,x,y,z, ra,rb,rc;
+
+ a = dividend_high >> 32;
+ b = dividend_high & 0xffffffff;
+ c = dividend_low >> 32;
+ d = dividend_low & 0xffffffff;
+
+ w = a/divisor;
+ ra = (a - (w * divisor)) << 32;
+
+ x = (ra + b)/divisor;
+ rb = ((ra + b) - (x * divisor)) << 32;
+
+ y = (rb + c)/divisor;
+ rc = ((rb + b) - (y * divisor)) << 32;
+
+ z = (rc + d)/divisor;
+
+ dr->result_high = (w << 32) + x;
+ dr->result_low = (y << 32) + z;
+
+}
+
--- /dev/null
+/*
+ * linux/arch/ppc/kernel/traps.c
+ *
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras (paulus@cs.anu.edu.au)
+ */
+
+/*
+ * This file handles the architecture-dependent parts of hardware exceptions
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/ppcdebug.h>
+
+extern int fix_alignment(struct pt_regs *);
+extern void bad_page_fault(struct pt_regs *, unsigned long);
+
+#ifdef CONFIG_XMON
+extern void xmon(struct pt_regs *regs);
+extern int xmon_bpt(struct pt_regs *regs);
+extern int xmon_sstep(struct pt_regs *regs);
+extern int xmon_iabr_match(struct pt_regs *regs);
+extern int xmon_dabr_match(struct pt_regs *regs);
+extern void (*xmon_fault_handler)(struct pt_regs *regs);
+#endif
+
+#ifdef CONFIG_XMON
+void (*debugger)(struct pt_regs *regs) = xmon;
+int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
+int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
+int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
+int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
+void (*debugger_fault_handler)(struct pt_regs *regs);
+#else
+#ifdef CONFIG_KGDB
+void (*debugger)(struct pt_regs *regs);
+int (*debugger_bpt)(struct pt_regs *regs);
+int (*debugger_sstep)(struct pt_regs *regs);
+int (*debugger_iabr_match)(struct pt_regs *regs);
+int (*debugger_dabr_match)(struct pt_regs *regs);
+void (*debugger_fault_handler)(struct pt_regs *regs);
+#endif
+#endif
+/*
+ * Trap & Exception support
+ */
+
+void
+_exception(int signr, struct pt_regs *regs)
+{
+ if (!user_mode(regs))
+ {
+ show_regs(regs);
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ debugger(regs);
+#endif
+ print_backtrace((unsigned long *)regs->gpr[1]);
+ panic("Exception in kernel pc %lx signal %d",regs->nip,signr);
+#if defined(CONFIG_PPCDBG) && (defined(CONFIG_XMON) || defined(CONFIG_KGDB))
+ /* Allow us to catch SIGILLs for 64-bit app/glibc debugging. -Peter */
+ } else if (signr == SIGILL) {
+ ifppcdebug(PPCDBG_SIGNALXMON)
+ debugger(regs);
+#endif
+ }
+ force_sig(signr, current);
+}
+
+void
+SystemResetException(struct pt_regs *regs)
+{
+ udbg_printf("System Reset in kernel mode.\n");
+ printk("System Reset in kernel mode.\n");
+#if defined(CONFIG_XMON)
+ xmon(regs);
+#else
+ for(;;);
+#endif
+}
+
+void
+MachineCheckException(struct pt_regs *regs)
+{
+ if ( !user_mode(regs) )
+ {
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ if (debugger_fault_handler) {
+ debugger_fault_handler(regs);
+ return;
+ }
+#endif
+ printk("Machine check in kernel mode.\n");
+ printk("Caused by (from SRR1=%lx): ", regs->msr);
+ show_regs(regs);
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ debugger(regs);
+#endif
+ print_backtrace((unsigned long *)regs->gpr[1]);
+ panic("machine check");
+ }
+ _exception(SIGSEGV, regs);
+}
+
+void
+SMIException(struct pt_regs *regs)
+{
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ {
+ debugger(regs);
+ return;
+ }
+#endif
+ show_regs(regs);
+ print_backtrace((unsigned long *)regs->gpr[1]);
+ panic("System Management Interrupt");
+}
+
+void
+UnknownException(struct pt_regs *regs)
+{
+ printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+ regs->nip, regs->msr, regs->trap);
+ _exception(SIGTRAP, regs);
+}
+
+void
+InstructionBreakpointException(struct pt_regs *regs)
+{
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ if (debugger_iabr_match(regs))
+ return;
+#endif
+ _exception(SIGTRAP, regs);
+}
+
+void
+ProgramCheckException(struct pt_regs *regs)
+{
+ if (regs->msr & 0x100000) {
+ /* IEEE FP exception */
+ _exception(SIGFPE, regs);
+ } else if (regs->msr & 0x20000) {
+ /* trap exception */
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ if (debugger_bpt(regs))
+ return;
+#endif
+ _exception(SIGTRAP, regs);
+ } else {
+ _exception(SIGILL, regs);
+ }
+}
+
+void
+SingleStepException(struct pt_regs *regs)
+{
+ regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ if (debugger_sstep(regs))
+ return;
+#endif
+ _exception(SIGTRAP, regs);
+}
+
+/* Dummy handler for Performance Monitor */
+
+void
+PerformanceMonitorException(struct pt_regs *regs)
+{
+ _exception(SIGTRAP, regs);
+}
+
+void
+AlignmentException(struct pt_regs *regs)
+{
+ int fixed;
+
+ fixed = fix_alignment(regs);
+ if (fixed == 1) {
+ ifppcdebug(PPCDBG_ALIGNFIXUP)
+ if (!user_mode(regs))
+ PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n", regs->nip);
+ regs->nip += 4; /* skip over emulated instruction */
+ return;
+ }
+ if (fixed == -EFAULT) {
+ /* fixed == -EFAULT means the operand address was bad */
+ if (user_mode(regs))
+ force_sig(SIGSEGV, current);
+ else
+ bad_page_fault(regs, regs->dar);
+ return;
+ }
+ _exception(SIGBUS, regs);
+}
+
+void __init trap_init(void)
+{
+}
--- /dev/null
+/*
+ * NS16550 Serial Port (uart) debugging stuff.
+ *
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * NOTE: I am trying to make this code avoid any static data references to
+ * simplify debugging early boot. We'll see how that goes...
+ *
+ * To use this call udbg_init() first. It will init the uart to 9600 8N1.
+ * You may need to update the COM1 define if your uart is at a different addr.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stdarg.h>
+#define WANT_PPCDBG_TAB /* Only defined here */
+#include <asm/ppcdebug.h>
+#include <asm/processor.h>
+#include <asm/Naca.h>
+#include <asm/uaccess.h>
+#include <asm/machdep.h>
+
+extern struct Naca *naca;
+extern int _machine;
+
+struct NS16550 {
+ /* this struct must be packed */
+ unsigned char rbr; /* 0 */
+ unsigned char ier; /* 1 */
+ unsigned char fcr; /* 2 */
+ unsigned char lcr; /* 3 */
+ unsigned char mcr; /* 4 */
+ unsigned char lsr; /* 5 */
+ unsigned char msr; /* 6 */
+ unsigned char scr; /* 7 */
+};
+
+#define thr rbr
+#define iir fcr
+#define dll rbr
+#define dlm ier
+#define dlab lcr
+
+#define LSR_DR 0x01 /* Data ready */
+#define LSR_OE 0x02 /* Overrun */
+#define LSR_PE 0x04 /* Parity error */
+#define LSR_FE 0x08 /* Framing error */
+#define LSR_BI 0x10 /* Break */
+#define LSR_THRE 0x20 /* Xmit holding register empty */
+#define LSR_TEMT 0x40 /* Xmitter empty */
+#define LSR_ERR 0x80 /* Error */
+
+volatile struct NS16550 *udbg_comport;
+
+spinlock_t udbg_lock = SPIN_LOCK_UNLOCKED;
+
+void
+udbg_init_uart(void *comport)
+{
+ if (comport) {
+ udbg_comport = (struct NS16550 *)comport;
+ udbg_comport->lcr = 0x00; eieio();
+ udbg_comport->ier = 0xFF; eieio();
+ udbg_comport->ier = 0x00; eieio();
+ udbg_comport->lcr = 0x80; eieio(); /* Access baud rate */
+ udbg_comport->dll = 12; eieio(); /* 1 = 115200, 2 = 57600, 3 = 38400, 12 = 9600 baud */
+ udbg_comport->dlm = 0; eieio(); /* dll >> 8 which should be zero for fast rates; */
+ udbg_comport->lcr = 0x03; eieio(); /* 8 data, 1 stop, no parity */
+ udbg_comport->mcr = 0x03; eieio(); /* RTS/DTR */
+ udbg_comport->fcr = 0x07; eieio(); /* Clear & enable FIFOs */
+ }
+}
+
+void
+udbg_putc(unsigned char c)
+{
+ if ( udbg_comport ) {
+ while ((udbg_comport->lsr & LSR_THRE) == 0)
+ /* wait for idle */;
+ udbg_comport->thr = c; eieio();
+ if (c == '\n') {
+ /* Also put a CR. This is for convenience. */
+ while ((udbg_comport->lsr & LSR_THRE) == 0)
+ /* wait for idle */;
+ udbg_comport->thr = '\r'; eieio();
+ }
+ } else if ( _machine == _MACH_iSeries ) {
+ /* ToDo: switch this via ppc_md */
+ printk("%c", c);
+ }
+}
+
+int udbg_getc_poll(void)
+{
+ if (udbg_comport) {
+ if ((udbg_comport->lsr & LSR_DR) != 0)
+ return udbg_comport->rbr;
+ else
+ return -1;
+ }
+ return -1;
+}
+
+unsigned char
+udbg_getc(void)
+{
+ if ( udbg_comport ) {
+ while ((udbg_comport->lsr & LSR_DR) == 0)
+ /* wait for char */;
+ return udbg_comport->rbr;
+ }
+ return 0;
+}
+
+void
+udbg_puts(const char *s)
+{
+ if (ppc_md.udbg_putc) {
+ char c;
+
+ if (s && *s != '\0') {
+ while ((c = *s++) != '\0')
+ ppc_md.udbg_putc(c);
+ } else {
+ udbg_puts("NULL");
+ }
+ } else {
+ printk("%s", s);
+ }
+}
+
+int
+udbg_write(const char *s, int n)
+{
+ int remain = n;
+ char c;
+ if (!ppc_md.udbg_putc)
+ for (;;); /* stop here for cpuctl */
+ if ( s && *s != '\0' ) {
+ while ( (( c = *s++ ) != '\0') && (remain-- > 0)) {
+ ppc_md.udbg_putc(c);
+ }
+ } else
+ udbg_puts("NULL");
+ return n - remain;
+}
+
+int
+udbg_read(char *buf, int buflen) {
+ char c, *p = buf;
+ int i;
+ if (!ppc_md.udbg_putc)
+ for (;;); /* stop here for cpuctl */
+ for (i = 0; i < buflen; ++i) {
+ do {
+ c = ppc_md.udbg_getc();
+ } while (c == 0x11 || c == 0x13);
+ *p++ = c;
+ }
+ return i;
+}
+
+void
+udbg_puthex(unsigned long val)
+{
+ int i, nibbles = sizeof(val)*2;
+ unsigned char buf[sizeof(val)*2+1];
+ for (i = nibbles-1; i >= 0; i--) {
+ buf[i] = (val & 0xf) + '0';
+ if (buf[i] > '9')
+ buf[i] += ('a'-'0'-10);
+ val >>= 4;
+ }
+ buf[nibbles] = '\0';
+ udbg_puts(buf);
+}
+
+void
+udbg_printSP(const char *s)
+{
+ if (_machine == _MACH_pSeries) {
+ unsigned long sp;
+ asm("mr %0,1" : "=r" (sp) :);
+ if (s)
+ udbg_puts(s);
+ udbg_puthex(sp);
+ }
+}
+
+void
+udbg_printf(const char *fmt, ...)
+{
+ unsigned long flags;
+ unsigned char buf[256];
+
+ va_list args;
+ va_start(args, fmt);
+
+ spin_lock_irqsave(&udbg_lock, flags);
+ vsprintf(buf, fmt, args);
+ udbg_puts(buf);
+ spin_unlock_irqrestore(&udbg_lock, flags);
+
+ va_end(args);
+}
+
+/* Special print used by PPCDBG() macro */
+void
+udbg_ppcdbg(unsigned long flags, const char *fmt, ...)
+{
+ unsigned long flags;
+ unsigned long active_debugs = flags & naca->debug_switch;
+
+ if ( active_debugs ) {
+ va_list ap;
+ unsigned char buf[256];
+ unsigned long i, len = 0;
+
+ spin_lock_irqsave(&udbg_lock, flags);
+ for(i=0; i < PPCDBG_NUM_FLAGS ;i++) {
+ if (((1U << i) & active_debugs) &&
+ trace_names[i]) {
+ len += strlen(trace_names[i]);
+ udbg_puts(trace_names[i]);
+ break;
+ }
+ }
+ sprintf(buf, " [%s]: ", current->comm);
+ len += strlen(buf);
+ udbg_puts(buf);
+
+ while(len < 18) {
+ udbg_puts(" ");
+ len++;
+ }
+
+ va_start(ap, fmt);
+ vsprintf(buf, fmt, ap);
+ udbg_puts(buf);
+ spin_unlock_irqrestore(&udbg_lock, flags);
+
+ va_end(ap);
+ }
+}
+
+unsigned long
+udbg_ifdebug(unsigned long flags)
+{
+ return (flags & naca->debug_switch);
+}
--- /dev/null
+/*
+ * arch/ppc/kernel/xics.c
+ *
+ * Copyright 2000 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/smp.h>
+#include <asm/Naca.h>
+#include <asm/rtas.h>
+#include "i8259.h"
+#include "xics.h"
+#include <asm/ppcdebug.h>
+
+extern struct Naca *naca;
+
+void xics_enable_irq(u_int irq);
+void xics_disable_irq(u_int irq);
+void xics_mask_and_ack_irq(u_int irq);
+void xics_end_irq(u_int irq);
+void xics_set_affinity(unsigned int irq_nr, unsigned long cpumask);
+
+struct hw_interrupt_type xics_pic = {
+ " XICS ",
+ NULL,
+ NULL,
+ xics_enable_irq,
+ xics_disable_irq,
+ xics_mask_and_ack_irq,
+ xics_end_irq,
+ xics_set_affinity
+};
+
+struct hw_interrupt_type xics_8259_pic = {
+ " XICS/8259",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ xics_mask_and_ack_irq,
+ NULL
+};
+
+#define XICS_IPI 2
+#define XICS_IRQ_OFFSET 0x10
+#define XICS_IRQ_SPURIOUS 0
+
+/* Want a priority other than 0. Various HW issues require this. */
+#define DEFAULT_PRIORITY 5
+
+struct xics_ipl {
+ union {
+ u32 word;
+ u8 bytes[4];
+ } xirr_poll;
+ union {
+ u32 word;
+ u8 bytes[4];
+ } xirr;
+ u32 dummy;
+ union {
+ u32 word;
+ u8 bytes[4];
+ } qirr;
+};
+
+struct xics_info {
+ volatile struct xics_ipl * per_cpu[NR_CPUS];
+};
+
+struct xics_info xics_info;
+
+unsigned long long intr_base = 0;
+int xics_irq_8259_cascade = 0;
+int xics_irq_8259_cascade_real = 0;
+unsigned int default_server = 0;
+unsigned int default_distrib_server = 0;
+
+/* RTAS service tokens */
+int ibm_get_xive;
+int ibm_set_xive;
+int ibm_int_off;
+
+struct xics_interrupt_node {
+ unsigned long long addr;
+ unsigned long long size;
+} inodes[NR_CPUS*2];
+
+typedef struct {
+ int (*xirr_info_get)(int cpu);
+ void (*xirr_info_set)(int cpu, int val);
+ void (*cppr_info)(int cpu, u8 val);
+ void (*qirr_info)(int cpu, u8 val);
+} xics_ops;
+
+
+static int pSeries_xirr_info_get(int n_cpu)
+{
+ return (xics_info.per_cpu[n_cpu]->xirr.word);
+}
+
+static void pSeries_xirr_info_set(int n_cpu, int value)
+{
+ xics_info.per_cpu[n_cpu]->xirr.word = value;
+}
+
+static void pSeries_cppr_info(int n_cpu, u8 value)
+{
+ xics_info.per_cpu[n_cpu]->xirr.bytes[0] = value;
+}
+
+static void pSeries_qirr_info(int n_cpu , u8 value)
+{
+ xics_info.per_cpu[n_cpu]->qirr.bytes[0] = value;
+}
+
+static xics_ops pSeries_ops = {
+ pSeries_xirr_info_get,
+ pSeries_xirr_info_set,
+ pSeries_cppr_info,
+ pSeries_qirr_info
+};
+
+static xics_ops *ops = &pSeries_ops;
+extern xics_ops pSeriesLP_ops;
+
+
+void
+xics_enable_irq(
+ u_int virq
+ )
+{
+ u_int irq;
+ unsigned long status;
+ long call_status;
+
+ virq -= XICS_IRQ_OFFSET;
+ irq = virt_irq_to_real(virq);
+ if (irq == XICS_IPI)
+ return;
+#ifdef CONFIG_IRQ_ALL_CPUS
+ call_status = rtas_call(ibm_set_xive, 3, 1, (unsigned long*)&status,
+ irq, smp_threads_ready ? default_distrib_server : default_server, DEFAULT_PRIORITY);
+#else
+ call_status = rtas_call(ibm_set_xive, 3, 1, (unsigned long*)&status,
+ irq, default_server, DEFAULT_PRIORITY);
+#endif
+ if( call_status != 0 ) {
+ printk("xics_enable_irq: irq=%x: rtas_call failed; retn=%lx, status=%lx\n",
+ irq, call_status, status);
+ return;
+ }
+}
+
+void
+xics_disable_irq(
+ u_int virq
+ )
+{
+ u_int irq;
+ unsigned long status;
+ long call_status;
+
+ virq -= XICS_IRQ_OFFSET;
+ irq = virt_irq_to_real(virq);
+ call_status = rtas_call(ibm_int_off, 1, 1, (unsigned long*)&status,
+ irq);
+ if( call_status != 0 ) {
+ printk("xics_disable_irq: irq=%x: rtas_call failed, retn=%lx\n",
+ irq, call_status);
+ return;
+ }
+}
+
+void
+xics_end_irq(
+ u_int irq
+ )
+{
+ int cpu = smp_processor_id();
+
+ ops->cppr_info(cpu, 0); /* actually the value overwritten by ack */
+ iosync();
+ ops->xirr_info_set(cpu, ((0xff<<24) | (virt_irq_to_real(irq-XICS_IRQ_OFFSET))));
+ iosync();
+}
+
+void
+xics_mask_and_ack_irq(
+ u_int irq
+ )
+{
+ int cpu = smp_processor_id();
+
+ if( irq < XICS_IRQ_OFFSET ) {
+ i8259_pic.ack(irq);
+ iosync();
+ ops->xirr_info_set(cpu, ((0xff<<24) | xics_irq_8259_cascade_real));
+ iosync();
+ }
+ else {
+ ops->cppr_info(cpu, 0xff);
+ iosync();
+ }
+}
+
+int
+xics_get_irq(struct pt_regs *regs)
+{
+ u_int cpu = smp_processor_id();
+ u_int vec;
+ int irq;
+
+ vec = ops->xirr_info_get(cpu);
+ /* (vec >> 24) == old priority */
+ vec &= 0x00ffffff;
+ /* for sanity, this had better be < NR_IRQS - 16 */
+ if( vec == xics_irq_8259_cascade_real ) {
+ irq = i8259_irq(cpu);
+ if(irq == -1) {
+ /* Spurious cascaded interrupt. Still must ack xics */
+ xics_end_irq(XICS_IRQ_OFFSET + xics_irq_8259_cascade);
+ irq = -1;
+ }
+ } else if( vec == XICS_IRQ_SPURIOUS ) {
+ irq = -1;
+ printk("spurious PPC interrupt!\n");
+ } else
+ irq = real_irq_to_virt(vec) + XICS_IRQ_OFFSET;
+ return irq;
+}
+
+
+#ifdef CONFIG_SMP
+void xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+{
+ extern volatile unsigned long xics_ipi_message[];
+ int cpu = smp_processor_id();
+
+ ops->qirr_info(cpu, 0xff);
+ while (xics_ipi_message[cpu]) {
+ if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, &xics_ipi_message[cpu])) {
+ mb();
+ smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
+ }
+ if (test_and_clear_bit(PPC_MSG_RESCHEDULE, &xics_ipi_message[cpu])) {
+ mb();
+ smp_message_recv(PPC_MSG_RESCHEDULE, regs);
+ }
+ if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, &xics_ipi_message[cpu])) {
+ mb();
+ smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
+ }
+ }
+}
+
+void xics_cause_IPI(int cpu)
+{
+ ops->qirr_info(cpu,0) ;
+}
+
+void xics_setup_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ ops->cppr_info(cpu, 0xff);
+ iosync();
+}
+#endif /* CONFIG_SMP */
+
+void
+xics_init_IRQ( void )
+{
+ int i;
+ unsigned long intr_size = 0;
+ struct device_node *np;
+ uint *ireg, ilen, indx=0;
+
+ ibm_get_xive = rtas_token("ibm,get-xive");
+ ibm_set_xive = rtas_token("ibm,set-xive");
+ ibm_int_off = rtas_token("ibm,int-off");
+
+ np = find_type_devices("PowerPC-External-Interrupt-Presentation");
+ if (!np) {
+ printk(KERN_WARNING "Can't find Interrupt Presentation\n");
+ udbg_printf("Can't find Interrupt Presentation\n");
+ while (1);
+ }
+nextnode:
+ ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
+ if (ireg) {
+ /*
+ * set node starting index for this node
+ */
+ indx = *ireg;
+ }
+
+ ireg = (uint *)get_property(np, "reg", &ilen);
+ if (!ireg) {
+ printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
+ udbg_printf("Can't find Interrupt Reg Property\n");
+ while (1);
+ }
+
+ while (ilen) {
+ inodes[indx].addr = (unsigned long long)*ireg++ << 32;
+ ilen -= sizeof(uint);
+ inodes[indx].addr |= *ireg++;
+ ilen -= sizeof(uint);
+ inodes[indx].size = (unsigned long long)*ireg++ << 32;
+ ilen -= sizeof(uint);
+ inodes[indx].size |= *ireg++;
+ ilen -= sizeof(uint);
+ indx++;
+ if (indx >= NR_CPUS) break;
+ }
+
+ np = np->next;
+ if ((indx < NR_CPUS) && np) goto nextnode;
+
+ /* Find the server numbers for the boot cpu. */
+ for (np = find_type_devices("cpu"); np; np = np->next) {
+ ireg = (uint *)get_property(np, "reg", &ilen);
+ if (ireg && ireg[0] == hard_smp_processor_id()) {
+ ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+ i = ilen / sizeof(int);
+ if (ireg && i > 0) {
+ default_server = ireg[0];
+ default_distrib_server = ireg[i-1]; /* take last element */
+ }
+ break;
+ }
+ }
+
+ intr_base = inodes[0].addr;
+ intr_size = (ulong)inodes[0].size;
+
+ np = find_type_devices("interrupt-controller");
+ if (!np) {
+ printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
+ xics_irq_8259_cascade = -1;
+ } else {
+ ireg = (uint *) get_property(np, "interrupts", 0);
+ if (!ireg) {
+ printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
+ udbg_printf("Can't find ISA Interrupts Property\n");
+ while (1);
+ }
+ xics_irq_8259_cascade_real = *ireg;
+ xics_irq_8259_cascade = virt_irq_create_mapping(xics_irq_8259_cascade_real);
+ }
+
+ if (_machine == _MACH_pSeries) {
+#ifdef CONFIG_SMP
+ for (i = 0; i < naca->processorCount; ++i) {
+ xics_info.per_cpu[i] =
+ __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
+ (ulong)inodes[get_hard_smp_processor_id(i)].size, _PAGE_NO_CACHE);
+ }
+#else
+ xics_info.per_cpu[0] = __ioremap((ulong)intr_base, intr_size, _PAGE_NO_CACHE);
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_PSERIES
+ /* actually iSeries does not use any of xics...but it has link dependencies
+ * for now, except this new one...
+ */
+ } else if (_machine == _MACH_pSeriesLP) {
+ ops = &pSeriesLP_ops;
+#endif
+ }
+
+ xics_8259_pic.enable = i8259_pic.enable;
+ xics_8259_pic.disable = i8259_pic.disable;
+ for (i = 0; i < 16; ++i)
+ irq_desc[i].handler = &xics_8259_pic;
+ for (; i < NR_IRQS; ++i)
+ irq_desc[i].handler = &xics_pic;
+
+ ops->cppr_info(0, 0xff);
+ iosync();
+ if (xics_irq_8259_cascade != -1) {
+ if (request_irq(xics_irq_8259_cascade + XICS_IRQ_OFFSET, no_action,
+ 0, "8259 cascade", 0))
+ printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
+ i8259_init();
+ }
+
+#ifdef CONFIG_SMP
+ real_irq_to_virt_map[XICS_IPI] = virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
+ request_irq(XICS_IPI + XICS_IRQ_OFFSET, xics_ipi_action, 0, "IPI", 0);
+ irq_desc[XICS_IPI+XICS_IRQ_OFFSET].status |= IRQ_PER_CPU;
+#endif
+}
+
+void xics_isa_init(void)
+{
+ return;
+ if (request_irq(xics_irq_8259_cascade + XICS_IRQ_OFFSET, no_action,
+ 0, "8259 cascade", 0))
+ printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
+ i8259_init();
+}
+
+/*
+ * Find first logical cpu and return its physical cpu number
+ */
+static inline u32 physmask(u32 cpumask)
+{
+ int i;
+
+ for (i = 0; i < smp_num_cpus; ++i, cpumask >>= 1) {
+ if (cpumask & 1)
+ return get_hard_smp_processor_id(i);
+ }
+
+ printk(KERN_ERR "xics_set_affinity: invalid irq mask\n");
+
+ return default_distrib_server;
+}
+
+void xics_set_affinity(unsigned int virq, unsigned long cpumask)
+{
+ irq_desc_t *desc = irq_desc + virq;
+ unsigned int irq;
+ unsigned long flags;
+ long status;
+ unsigned long xics_status[2];
+ u32 newmask;
+
+ virq -= XICS_IRQ_OFFSET;
+ irq = virt_irq_to_real(virq);
+ if (irq == XICS_IPI)
+ return;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status, irq);
+
+ if (status) {
+ printk("xics_set_affinity: irq=%d ibm,get-xive returns %ld\n",
+ irq, status);
+ goto out;
+ }
+
+ /* For the moment only implement delivery to all cpus or one cpu */
+ if (cpumask == 0xffffffff)
+ newmask = default_distrib_server;
+ else
+ newmask = physmask(cpumask);
+
+ status = rtas_call(ibm_set_xive, 3, 1, NULL,
+ irq, newmask, xics_status[1]);
+
+ if (status) {
+ printk("xics_set_affinity irq=%d ibm,set-xive returns %ld\n",
+ irq, status);
+ goto out;
+ }
+
+out:
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
--- /dev/null
+/*
+ * arch/ppc/kernel/xics.h
+ *
+ * Copyright 2000 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC_KERNEL_XICS_H
+#define _PPC_KERNEL_XICS_H
+
+#include "local_irq.h"
+
+extern struct hw_interrupt_type xics_pic;
+extern struct hw_interrupt_type xics_8259_pic;
+
+void xics_init_IRQ(void);
+int xics_get_irq(struct pt_regs *);
+void xics_isa_init(void);
+
+#endif /* _PPC_KERNEL_XICS_H */
--- /dev/null
+#
+# Makefile for ppc64-specific library files..
+#
+
+USE_STANDARD_AS_RULE := true
+
+O_TARGET = lib.o
+
+obj-y := checksum.o dec_and_lock.o string.o strcase.o
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/*
+ * This file contains assembly-language implementations
+ * of IP-style 1's complement checksum routines.
+ *
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+#include <linux/sys.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include "../kernel/ppc_asm.tmpl"
+
+ .text
+
+/*
+ * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
+ * len is in words and is always >= 5.
+ *
+ * In practice len == 5, but this is not guaranteed. So this code does not
+ * attempt to use doubleword instructions.
+ */
+_GLOBAL(ip_fast_csum)
+ lwz r0,0(r3)
+ lwzu r5,4(r3)
+ addic. r4,r4,-2
+ addc r0,r0,r5
+ mtctr r4
+ blelr-
+1: lwzu r4,4(r3)
+ adde r0,r0,r4
+ bdnz 1b
+ addze r0,r0 /* add in final carry */
+ rldicl r4,r0,32,0 /* fold two 32-bit halves together */
+ add r0,r0,r4
+ srdi r0,r0,32
+ rlwinm r3,r0,16,0,31 /* fold two halves together */
+ add r3,r0,r3
+ not r3,r3
+ srwi r3,r3,16
+ blr
+
+/*
+ * Compute checksum of TCP or UDP pseudo-header:
+ * csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
+ * No real gain trying to do this specially for 64 bit, but
+ * the 32 bit addition may spill into the upper bits of
+ * the doubleword so we still must fold it down from 64.
+ */
+_GLOBAL(csum_tcpudp_magic)
+ rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
+ addc r0,r3,r4 /* add 4 32-bit words together */
+ adde r0,r0,r5
+ adde r0,r0,r7
+ rldicl r4,r0,32,0 /* fold 64 bit value */
+ add r0,r4,r0
+ srdi r0,r0,32
+ rlwinm r3,r0,16,0,31 /* fold two halves together */
+ add r3,r0,r3
+ not r3,r3
+ srwi r3,r3,16
+ blr
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit).
+ *
+ * This code assumes at least halfword alignment, though the length
+ * can be any number of bytes. The sum is accumulated in r5.
+ *
+ * csum_partial(r3=buff, r4=len, r5=sum)
+ */
+_GLOBAL(csum_partial)
+ subi r3,r3,8 /* we'll offset by 8 for the loads */
+ srdi. r6,r4,3 /* divide by 8 for doubleword count */
+ addic r5,r5,0 /* clear carry */
+ beq 3f /* if we're doing < 8 bytes */
+ andi. r0,r3,2 /* aligned on a word boundary already? */
+ beq+ 1f
+ lhz r6,8(r3) /* do 2 bytes to get aligned */
+ addi r3,r3,2
+ subi r4,r4,2
+ addc r5,r5,r6
+ srdi. r6,r4,3 /* recompute number of doublewords */
+ beq 3f /* any left? */
+1: mtctr r6
+2: ldu r6,8(r3) /* main sum loop */
+ adde r5,r5,r6
+ bdnz 2b
+ andi. r4,r4,7 /* compute bytes left to sum after doublewords */
+3: cmpi 0,r4,4 /* is at least a full word left? */
+ blt 4f
+ lwz r6,8(r3) /* sum this word */
+ addi r3,r3,4
+ subi r4,r4,4
+ adde r5,r5,r6
+4: cmpi 0,r4,2 /* is at least a halfword left? */
+ blt+ 5f
+ lhz r6,8(r3) /* sum this halfword */
+ addi r3,r3,2
+ subi r4,r4,2
+ adde r5,r5,r6
+5: cmpi 0,r4,1 /* is at least a byte left? */
+ bne+ 6f
+ lbz r6,8(r3) /* sum this byte */
+ slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */
+ adde r5,r5,r6
+6: addze r5,r5 /* add in final carry */
+ rldicl r4,r5,32,0 /* fold two 32-bit halves together */
+ add r3,r4,r5
+ srdi r3,r3,32
+ blr
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively, and (for an error on
+ * src) zeroes the rest of dst.
+ *
+ * This code needs to be reworked to take advantage of 64 bit sum+copy.
+ * However, due to tokenring halfword alignment problems this will be very
+ * tricky. For now we'll leave it until we instrument it somehow.
+ *
+ * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
+ */
+_GLOBAL(csum_partial_copy_generic)
+ addic r0,r6,0
+ subi r3,r3,4
+ subi r4,r4,4
+ srwi. r6,r5,2
+ beq 3f /* if we're doing < 4 bytes */
+ andi. r9,r4,2 /* Align dst to longword boundary */
+ beq+ 1f
+81: lhz r6,4(r3) /* do 2 bytes to get aligned */
+ addi r3,r3,2
+ subi r5,r5,2
+91: sth r6,4(r4)
+ addi r4,r4,2
+ addc r0,r0,r6
+ srwi. r6,r5,2 /* # words to do */
+ beq 3f
+1: mtctr r6
+82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */
+92: stwu r6,4(r4) /* be unnecessary to unroll this loop */
+ adde r0,r0,r6
+ bdnz 82b
+ andi. r5,r5,3
+3: cmpi 0,r5,2
+ blt+ 4f
+83: lhz r6,4(r3)
+ addi r3,r3,2
+ subi r5,r5,2
+93: sth r6,4(r4)
+ addi r4,r4,2
+ adde r0,r0,r6
+4: cmpi 0,r5,1
+ bne+ 5f
+84: lbz r6,4(r3)
+94: stb r6,4(r4)
+ slwi r6,r6,8 /* Upper byte of word */
+ adde r0,r0,r6
+5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */
+ rldicl r4,r3,32,0 /* fold 64 bit value */
+ add r3,r4,r3
+ srdi r3,r3,32
+ blr
+
+/* These shouldn't go in the fixup section, since that would
+ cause the ex_table addresses to get out of order. */
+
+ .globl src_error_1
+src_error_1:
+ li r6,0
+ subi r5,r5,2
+95: sth r6,4(r4)
+ addi r4,r4,2
+ srwi. r6,r5,2
+ beq 3f
+ mtctr r6
+ .globl src_error_2
+src_error_2:
+ li r6,0
+96: stwu r6,4(r4)
+ bdnz 96b
+3: andi. r5,r5,3
+ beq src_error
+ .globl src_error_3
+src_error_3:
+ li r6,0
+ mtctr r5
+ addi r4,r4,3
+97: stbu r6,1(r4)
+ bdnz 97b
+ .globl src_error
+src_error:
+ cmpi 0,r7,0
+ beq 1f
+ li r6,-EFAULT
+ stw r6,0(r7)
+1: addze r3,r0
+ blr
+
+ .globl dst_error
+dst_error:
+ cmpi 0,r8,0
+ beq 1f
+ li r6,-EFAULT
+ stw r6,0(r8)
+1: addze r3,r0
+ blr
+
+.section __ex_table,"a"
+ .align 3
+ .llong 81b,src_error_1
+ .llong 91b,dst_error
+ .llong 82b,src_error_2
+ .llong 92b,dst_error
+ .llong 83b,src_error_3
+ .llong 93b,dst_error
+ .llong 84b,src_error_3
+ .llong 94b,dst_error
+ .llong 95b,dst_error
+ .llong 96b,dst_error
+ .llong 97b,dst_error
--- /dev/null
+/*
+ * ppc64 version of atomic_dec_and_lock() using cmpxchg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+{
+ int counter;
+ int newcount;
+
+repeat:
+ counter = atomic_read(atomic);
+ newcount = counter-1;
+
+ if (!newcount)
+ goto slow_path;
+
+ newcount = cmpxchg(&atomic->counter, counter, newcount);
+
+ if (newcount != counter)
+ goto repeat;
+ return 0;
+
+slow_path:
+ spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock(lock);
+ return 0;
+}
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/ctype.h>
+
+int strcasecmp(const char *s1, const char *s2)
+{
+ int c1, c2;
+
+ do {
+ c1 = tolower(*s1++);
+ c2 = tolower(*s2++);
+ } while (c1 == c2 && c1 != 0);
+ return c1 - c2;
+}
+
+int strncasecmp(const char *s1, const char *s2, int n)
+{
+ int c1, c2;
+
+ do {
+ c1 = tolower(*s1++);
+ c2 = tolower(*s2++);
+ } while ((--n > 0) && c1 == c2 && c1 != 0);
+ return c1 - c2;
+}
--- /dev/null
+/*
+ * String handling functions for PowerPC.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "../kernel/ppc_asm.tmpl"
+#include <asm/processor.h>
+#include <asm/errno.h>
+
+#define CACHE_LINE_SIZE 128
+#define LG_CACHE_LINE_SIZE 7
+#define MAX_COPY_PREFETCH 1
+
+#define COPY_16_BYTES \
+ lwz r7,4(r4); \
+ lwz r8,8(r4); \
+ lwz r9,12(r4); \
+ lwzu r10,16(r4); \
+ stw r7,4(r6); \
+ stw r8,8(r6); \
+ stw r9,12(r6); \
+ stwu r10,16(r6)
+
+#define COPY_16_BYTES_WITHEX(n) \
+8 ## n ## 0: \
+ lwz r7,4(r4); \
+8 ## n ## 1: \
+ lwz r8,8(r4); \
+8 ## n ## 2: \
+ lwz r9,12(r4); \
+8 ## n ## 3: \
+ lwzu r10,16(r4); \
+8 ## n ## 4: \
+ stw r7,4(r6); \
+8 ## n ## 5: \
+ stw r8,8(r6); \
+8 ## n ## 6: \
+ stw r9,12(r6); \
+8 ## n ## 7: \
+ stwu r10,16(r6)
+
+#define COPY_16_BYTES_EXCODE(n) \
+9 ## n ## 0: \
+ addi r5,r5,-(16 * n); \
+ b 104f; \
+9 ## n ## 1: \
+ addi r5,r5,-(16 * n); \
+ b 105f; \
+.section __ex_table,"a"; \
+ .align 3; \
+ .llong 8 ## n ## 0b,9 ## n ## 0b; \
+ .llong 8 ## n ## 1b,9 ## n ## 0b; \
+ .llong 8 ## n ## 2b,9 ## n ## 0b; \
+ .llong 8 ## n ## 3b,9 ## n ## 0b; \
+ .llong 8 ## n ## 4b,9 ## n ## 1b; \
+ .llong 8 ## n ## 5b,9 ## n ## 1b; \
+ .llong 8 ## n ## 6b,9 ## n ## 1b; \
+ .llong 8 ## n ## 7b,9 ## n ## 1b; \
+.text
+
+CACHELINE_BYTES = CACHE_LINE_SIZE
+LG_CACHELINE_BYTES = LG_CACHE_LINE_SIZE
+CACHELINE_MASK = (CACHE_LINE_SIZE-1)
+
+_GLOBAL(strcpy)
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r5)
+ bne 1b
+ blr
+
+_GLOBAL(strncpy)
+ cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r6)
+ bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
+ blr
+
+_GLOBAL(strcat)
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r5)
+ cmpwi 0,r0,0
+ bne 1b
+ addi r5,r5,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r5)
+ bne 1b
+ blr
+
+_GLOBAL(strcmp)
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r3,1(r5)
+ cmpwi 1,r3,0
+ lbzu r0,1(r4)
+ subf. r3,r0,r3
+ beqlr 1
+ beq 1b
+ blr
+
+_GLOBAL(strlen)
+ addi r4,r3,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ bne 1b
+ subf r3,r3,r4
+ blr
+
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero. This requires that the destination
+ * area is cacheable. -- paulus
+ */
+_GLOBAL(cacheable_memzero)
+ mr r5,r4
+ li r4,0
+ addi r6,r3,-4
+ cmplwi 0,r5,4
+ blt 7f
+ stwu r4,4(r6)
+ beqlr
+ andi. r0,r6,3
+ add r5,r0,r5
+ subf r6,r0,r6
+ clrlwi r7,r6,32-LG_CACHELINE_BYTES
+ add r8,r7,r5
+ srwi r9,r8,LG_CACHELINE_BYTES
+ addic. r9,r9,-1 /* total number of complete cachelines */
+ ble 2f
+ xori r0,r7,CACHELINE_MASK & ~3
+ srwi. r0,r0,2
+ beq 3f
+ mtctr r0
+4: stwu r4,4(r6)
+ bdnz 4b
+3: mtctr r9
+ li r7,4
+10: dcbz r7,r6
+ addi r6,r6,CACHELINE_BYTES
+ bdnz 10b
+ clrlwi r5,r8,32-LG_CACHELINE_BYTES
+ addi r5,r5,4
+2: srwi r0,r5,2
+ mtctr r0
+ bdz 6f
+1: stwu r4,4(r6)
+ bdnz 1b
+6: andi. r5,r5,3
+7: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r6,3
+8: stbu r4,1(r6)
+ bdnz 8b
+ blr
+
+_GLOBAL(memset)
+ rlwimi r4,r4,8,16,23
+ rlwimi r4,r4,16,0,15
+ addi r6,r3,-4
+ cmplwi 0,r5,4
+ blt 7f
+ stwu r4,4(r6)
+ beqlr
+ andi. r0,r6,3
+ add r5,r0,r5
+ subf r6,r0,r6
+ srwi r0,r5,2
+ mtctr r0
+ bdz 6f
+1: stwu r4,4(r6)
+ bdnz 1b
+6: andi. r5,r5,3
+7: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r6,3
+8: stbu r4,1(r6)
+ bdnz 8b
+ blr
+
+_GLOBAL(bcopy)
+ mr r6,r3
+ mr r3,r4
+ mr r4,r6
+ b .memcpy
+
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic. This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
+_GLOBAL(cacheable_memcpy)
+ add r7,r3,r5 /* test if the src & dst overlap */
+ add r8,r4,r5
+ cmplw 0,r4,r7
+ cmplw 1,r3,r8
+ crand 0,0,4 /* cr0.lt &= cr1.lt */
+ blt .memcpy /* if regions overlap */
+
+ addi r4,r4,-4
+ addi r6,r3,-4
+ neg r0,r3
+ andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
+ beq 58f
+
+ cmplw 0,r5,r0 /* is this more than total to do? */
+ blt 63f /* if not much to do */
+ andi. r8,r0,3 /* get it word-aligned first */
+ subf r5,r0,r5
+ mtctr r8
+ beq+ 61f
+70: lbz r9,4(r4) /* do some bytes */
+ stb r9,4(r6)
+ addi r4,r4,1
+ addi r6,r6,1
+ bdnz 70b
+61: srwi. r0,r0,2
+ mtctr r0
+ beq 58f
+72: lwzu r9,4(r4) /* do some words */
+ stwu r9,4(r6)
+ bdnz 72b
+
+58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+ clrlwi r5,r5,32-LG_CACHELINE_BYTES
+ li r11,4
+ mtctr r0
+ beq 63f
+53:
+ dcbz r11,r6
+ COPY_16_BYTES
+#if CACHE_LINE_SIZE >= 32
+ COPY_16_BYTES
+#if CACHE_LINE_SIZE >= 64
+ COPY_16_BYTES
+ COPY_16_BYTES
+#if CACHE_LINE_SIZE >= 128
+ COPY_16_BYTES
+ COPY_16_BYTES
+ COPY_16_BYTES
+ COPY_16_BYTES
+#endif
+#endif
+#endif
+ bdnz 53b
+
+63: srwi. r0,r5,2
+ mtctr r0
+ beq 64f
+30: lwzu r0,4(r4)
+ stwu r0,4(r6)
+ bdnz 30b
+
+64: andi. r0,r5,3
+ mtctr r0
+ beq+ 65f
+40: lbz r0,4(r4)
+ stb r0,4(r6)
+ addi r4,r4,1
+ addi r6,r6,1
+ bdnz 40b
+65: blr
+
+_GLOBAL(memmove)
+ cmplw 0,r3,r4
+ bgt .backwards_memcpy
+ /* fall through */
+
+_GLOBAL(memcpy)
+ srwi. r7,r5,3
+ addi r6,r3,-4
+ addi r4,r4,-4
+ beq 2f /* if less than 8 bytes to do */
+ andi. r0,r6,3 /* get dest word aligned */
+ mtctr r7
+ bne 5f
+1: lwz r7,4(r4)
+ lwzu r8,8(r4)
+ stw r7,4(r6)
+ stwu r8,8(r6)
+ bdnz 1b
+ andi. r5,r5,7
+2: cmplwi 0,r5,4
+ blt 3f
+ lwzu r0,4(r4)
+ addi r5,r5,-4
+ stwu r0,4(r6)
+3: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r4,r4,3
+ addi r6,r6,3
+4: lbzu r0,1(r4)
+ stbu r0,1(r6)
+ bdnz 4b
+ blr
+5: subfic r0,r0,4
+ mtctr r0
+6: lbz r7,4(r4)
+ addi r4,r4,1
+ stb r7,4(r6)
+ addi r6,r6,1
+ bdnz 6b
+ subf r5,r0,r5
+ rlwinm. r7,r5,32-3,3,31
+ beq 2b
+ mtctr r7
+ b 1b
+
+_GLOBAL(backwards_memcpy)
+ rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+ add r6,r3,r5
+ add r4,r4,r5
+ beq 2f
+ andi. r0,r6,3
+ mtctr r7
+ bne 5f
+1: lwz r7,-4(r4)
+ lwzu r8,-8(r4)
+ stw r7,-4(r6)
+ stwu r8,-8(r6)
+ bdnz 1b
+ andi. r5,r5,7
+2: cmplwi 0,r5,4
+ blt 3f
+ lwzu r0,-4(r4)
+ subi r5,r5,4
+ stwu r0,-4(r6)
+3: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+4: lbzu r0,-1(r4)
+ stbu r0,-1(r6)
+ bdnz 4b
+ blr
+5: mtctr r0
+6: lbzu r7,-1(r4)
+ stbu r7,-1(r6)
+ bdnz 6b
+ subf r5,r0,r5
+ rlwinm. r7,r5,32-3,3,31
+ beq 2b
+ mtctr r7
+ b 1b
+
+_GLOBAL(memcmp)
+ cmpwi 0,r5,0
+ ble- 2f
+ mtctr r5
+ addi r6,r3,-1
+ addi r4,r4,-1
+1: lbzu r3,1(r6)
+ lbzu r0,1(r4)
+ subf. r3,r0,r3
+ bdnzt 2,1b
+ blr
+2: li r3,0
+ blr
+
+_GLOBAL(memchr)
+ cmpwi 0,r5,0
+ ble- 2f
+ mtctr r5
+ addi r3,r3,-1
+1: lbzu r0,1(r3)
+ cmpw 0,r0,r4
+ bdnzf 2,1b
+ beqlr
+2: li r3,0
+ blr
+
+_GLOBAL(__copy_tofrom_user)
+ addi r4,r4,-4
+ addi r6,r3,-4
+ neg r0,r3
+ andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
+ beq 58f
+
+ cmplw 0,r5,r0 /* is this more than total to do? */
+ blt 63f /* if not much to do */
+ andi. r8,r0,3 /* get it word-aligned first */
+ mtctr r8
+ beq+ 61f
+70: lbz r9,4(r4) /* do some bytes */
+71: stb r9,4(r6)
+ addi r4,r4,1
+ addi r6,r6,1
+ bdnz 70b
+61: subf r5,r0,r5
+ srwi. r0,r0,2
+ mtctr r0
+ beq 58f
+72: lwzu r9,4(r4) /* do some words */
+73: stwu r9,4(r6)
+ bdnz 72b
+
+58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+ clrlwi r5,r5,32-LG_CACHELINE_BYTES
+ li r11,4
+ beq 63f
+
+ /* Here we decide how far ahead to prefetch the source */
+#if MAX_COPY_PREFETCH > 1
+ /* Heuristically, for large transfers we prefetch
+ MAX_COPY_PREFETCH cachelines ahead. For small transfers
+ we prefetch 1 cacheline ahead. */
+ cmpwi r0,MAX_COPY_PREFETCH
+ li r7,1
+ li r3,4
+ ble 111f
+ li r7,MAX_COPY_PREFETCH
+111: mtctr r7
+112: dcbt r3,r4
+ addi r3,r3,CACHELINE_BYTES
+ bdnz 112b
+#else /* MAX_COPY_PREFETCH == 1 */
+ li r3,CACHELINE_BYTES + 4
+ dcbt r11,r4
+#endif /* MAX_COPY_PREFETCH */
+
+ mtctr r0
+53:
+ dcbt r3,r4
+ dcbz r11,r6
+/* had to move these to keep extable in order */
+ .section __ex_table,"a"
+ .align 3
+ .llong 70b,100f
+ .llong 71b,101f
+ .llong 72b,102f
+ .llong 73b,103f
+ .llong 53b,105f
+ .text
+/* the main body of the cacheline loop */
+ COPY_16_BYTES_WITHEX(0)
+#if CACHE_LINE_SIZE >= 32
+ COPY_16_BYTES_WITHEX(1)
+#if CACHE_LINE_SIZE >= 64
+ COPY_16_BYTES_WITHEX(2)
+ COPY_16_BYTES_WITHEX(3)
+#if CACHE_LINE_SIZE >= 128
+ COPY_16_BYTES_WITHEX(4)
+ COPY_16_BYTES_WITHEX(5)
+ COPY_16_BYTES_WITHEX(6)
+ COPY_16_BYTES_WITHEX(7)
+#endif
+#endif
+#endif
+ bdnz 53b
+
+63: srwi. r0,r5,2
+ mtctr r0
+ beq 64f
+30: lwzu r0,4(r4)
+31: stwu r0,4(r6)
+ bdnz 30b
+
+64: andi. r0,r5,3
+ mtctr r0
+ beq+ 65f
+40: lbz r0,4(r4)
+41: stb r0,4(r6)
+ addi r4,r4,1
+ addi r6,r6,1
+ bdnz 40b
+65: li r3,0
+ blr
+
+/* read fault, initial single-byte copy */
+100: li r4,0
+ b 90f
+/* write fault, initial single-byte copy */
+101: li r4,1
+90: subf r5,r8,r5
+ li r3,0
+ b 99f
+/* read fault, initial word copy */
+102: li r4,0
+ b 91f
+/* write fault, initial word copy */
+103: li r4,1
+91: li r3,2
+ b 99f
+
+/*
+ * this stuff handles faults in the cacheline loop and branches to either
+ * 104f (if in read part) or 105f (if in write part), after updating r5
+ */
+ COPY_16_BYTES_EXCODE(0)
+#if CACHE_LINE_SIZE >= 32
+ COPY_16_BYTES_EXCODE(1)
+#if CACHE_LINE_SIZE >= 64
+ COPY_16_BYTES_EXCODE(2)
+ COPY_16_BYTES_EXCODE(3)
+#if CACHE_LINE_SIZE >= 128
+ COPY_16_BYTES_EXCODE(4)
+ COPY_16_BYTES_EXCODE(5)
+ COPY_16_BYTES_EXCODE(6)
+ COPY_16_BYTES_EXCODE(7)
+#endif
+#endif
+#endif
+
+/* read fault in cacheline loop */
+104: li r4,0
+ b 92f
+/* fault on dcbz (effectively a write fault) */
+/* or write fault in cacheline loop */
+105: li r4,1
+92: li r3,LG_CACHELINE_BYTES
+ b 99f
+/* read fault in final word loop */
+108: li r4,0
+ b 93f
+/* write fault in final word loop */
+109: li r4,1
+93: andi. r5,r5,3
+ li r3,2
+ b 99f
+/* read fault in final byte loop */
+110: li r4,0
+ b 94f
+/* write fault in final byte loop */
+111: li r4,1
+94: li r5,0
+ li r3,0
+/*
+ * At this stage the number of bytes not copied is
+ * r5 + (ctr << r3), and r4 is 0 for read or 1 for write.
+ */
+99: mfctr r0
+ slw r3,r0,r3
+ add r3,r3,r5
+ cmpwi 0,r4,0
+ bne 120f
+/* for read fault, clear out the destination: r3 bytes starting at 4(r6) */
+ srwi. r0,r3,2
+ li r9,0
+ mtctr r0
+ beq 113f
+112: stwu r9,4(r6)
+ bdnz 112b
+113: andi. r0,r3,3
+ mtctr r0
+ beq 120f
+114: stb r9,4(r6)
+ addi r6,r6,1
+ bdnz 114b
+120: blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 30b,108b
+ .llong 31b,109b
+ .llong 40b,110b
+ .llong 41b,111b
+ .llong 112b,120b
+ .llong 114b,120b
+ .text
+
+_GLOBAL(__clear_user)
+ addi r6,r3,-4
+ li r3,0
+ li r5,0
+ cmplwi 0,r4,4
+ blt 7f
+ /* clear a single word */
+11: stwu r5,4(r6)
+ beqlr
+ /* clear word sized chunks */
+ andi. r0,r6,3
+ add r4,r0,r4
+ subf r6,r0,r6
+ srwi r0,r4,2
+ mtctr r0
+ bdz 6f
+1: stwu r5,4(r6)
+ bdnz 1b
+6: andi. r4,r4,3
+ /* clear byte sized chunks */
+7: cmpwi 0,r4,0
+ beqlr
+ mtctr r4
+ addi r6,r6,3
+8: stbu r5,1(r6)
+ bdnz 8b
+ blr
+99: li r3,-EFAULT
+ blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 11b,99b
+ .llong 1b,99b
+ .llong 8b,99b
+ .text
+
+_GLOBAL(__strncpy_from_user)
+ addi r6,r3,-1
+ addi r4,r4,-1
+ cmpwi 0,r5,0
+ beq 2f
+ mtctr r5
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r6)
+ bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
+ beq 3f
+2: addi r6,r6,1
+3: subf r3,r3,r6
+ blr
+99: li r3,-EFAULT
+ blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 1b,99b
+ .text
+
+/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
+_GLOBAL(__strnlen_user)
+ addi r7,r3,-1
+ subf r6,r7,r5 /* top+1 - str */
+ cmplw 0,r4,r6
+ bge 0f
+ mr r6,r4
+0: mtctr r6 /* ctr = min(len, top - str) */
+1: lbzu r0,1(r7) /* get next byte */
+ cmpwi 0,r0,0
+ bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
+ addi r7,r7,1
+ subf r3,r3,r7 /* number of bytes we have looked at */
+ beqlr /* return if we found a 0 byte */
+ cmpw 0,r3,r4 /* did we look at all len bytes? */
+ blt 99f /* if not, must have hit top */
+ addi r3,r4,1 /* return len + 1 to indicate no null found */
+ blr
+99: li r3,0 /* bad address, return 0 */
+ blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 1b,99b
--- /dev/null
+#
+# Makefile for the linux ppc-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+EXTRA_CFLAGS = -mno-minimal-toc
+
+O_TARGET := mm.o
+
+obj-y := fault.o init.o extable.o imalloc.o
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/*
+ * linux/arch/ppc/mm/extable.c
+ *
+ * from linux/arch/i386/mm/extable.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+static inline unsigned long
+search_one_table(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid->fixup;
+ else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ return 0;
+}
+
+unsigned long
+search_exception_table(unsigned long addr)
+{
+ unsigned long ret;
+
+ ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
+ if (ret) return ret;
+
+ return 0;
+}
--- /dev/null
+/*
+ * arch/ppc/mm/fault.c
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/i386/mm/fault.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Modified by Cort Dougan and Paul Mackerras.
+ *
+ * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include <asm/ppcdebug.h>
+
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+extern void (*debugger)(struct pt_regs *);
+extern void (*debugger_fault_handler)(struct pt_regs *);
+extern int (*debugger_dabr_match)(struct pt_regs *);
+int debugger_kernel_faults = 1;
+#endif
+
+extern void die_if_kernel(char *, struct pt_regs *, long);
+void bad_page_fault(struct pt_regs *, unsigned long);
+void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+
+#ifdef CONFIG_PPCDBG
+extern unsigned long get_srr0(void);
+extern unsigned long get_srr1(void);
+#endif
+
+/*
+ * For 600- and 800-family processors, the error_code parameter is DSISR
+ * for a data fault, SRR1 for an instruction fault.
+ */
+void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+ siginfo_t info;
+ unsigned long code = SEGV_MAPERR;
+ unsigned long is_write = error_code & 0x02000000;
+ unsigned long mm_fault_return;
+
+ PPCDBG(PPCDBG_MM, "Entering do_page_fault: addr = 0x%16.16lx, error_code = %lx\n\tregs_trap = %lx, srr0 = %lx, srr1 = %lx\n", address, error_code, regs->trap, get_srr0(), get_srr1());
+ /*
+ * Fortunately the bit assignments in SRR1 for an instruction
+ * fault and DSISR for a data fault are mostly the same for the
+ * bits we are interested in. But there are some bits which
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (regs->trap == 0x400)
+ error_code &= 0x48200000;
+
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ if (debugger_fault_handler && (regs->trap == 0x300 ||
+ regs->trap == 0x380)) {
+ debugger_fault_handler(regs);
+ return;
+ }
+
+ if (error_code & 0x00400000) {
+ /* DABR match */
+ if (debugger_dabr_match(regs))
+ return;
+ }
+#endif /* CONFIG_XMON || CONFIG_KGDB */
+
+ if (in_interrupt() || mm == NULL) {
+ bad_page_fault(regs, address);
+ return;
+ }
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: vma = 0x%16.16lx\n", vma);
+ if (!vma) {
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: !vma\n");
+ goto bad_area;
+ }
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: vma->vm_start = 0x%16.16lx, vma->vm_flags = 0x%16.16lx\n", vma->vm_start, vma->vm_flags);
+ if (vma->vm_start <= address) {
+ goto good_area;
+ }
+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: vma->vm_flags = %lx, %lx\n", vma->vm_flags, VM_GROWSDOWN);
+ goto bad_area;
+ }
+ if (expand_stack(vma, address)) {
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: expand_stack\n");
+ goto bad_area;
+ }
+
+good_area:
+ code = SEGV_ACCERR;
+
+ /* a write */
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ /* a read */
+ } else {
+ /* protection fault */
+ if (error_code & 0x08000000)
+ goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: calling handle_mm_fault\n");
+ mm_fault_return = handle_mm_fault(mm, vma, address, is_write);
+ PPCDBG(PPCDBG_MM, "\tdo_page_fault: handle_mm_fault = 0x%lx\n",
+ mm_fault_return);
+ switch(mm_fault_return) {
+ case 1:
+ current->min_flt++;
+ break;
+ case 2:
+ current->maj_flt++;
+ break;
+ case 0:
+ goto do_sigbus;
+ default:
+ goto out_of_memory;
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+bad_area:
+ up_read(&mm->mmap_sem);
+
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = (void *) address;
+ PPCDBG(PPCDBG_SIGNAL, "Bad addr in user: 0x%lx\n", address);
+#ifdef CONFIG_XMON
+ ifppcdebug(PPCDBG_SIGNALXMON)
+ PPCDBG_ENTER_DEBUGGER_REGS(regs);
+#endif
+
+ force_sig_info(SIGSEGV, &info, current);
+ return;
+ }
+
+ bad_page_fault(regs, address);
+ return;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", current->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ bad_page_fault(regs, address);
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info (SIGBUS, &info, current);
+ if (!user_mode(regs))
+ bad_page_fault(regs, address);
+}
+
+/*
+ * bad_page_fault is called when we have a bad access from the kernel.
+ * It is called from do_page_fault above and from some of the procedures
+ * in traps.c.
+ */
+void
+bad_page_fault(struct pt_regs *regs, unsigned long address)
+{
+ unsigned long fixup;
+
+ /* Are we prepared to handle this fault? */
+ if ((fixup = search_exception_table(regs->nip)) != 0) {
+ regs->nip = fixup;
+ return;
+ }
+
+ /* kernel has accessed a bad area */
+ show_regs(regs);
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+ if (debugger_kernel_faults)
+ debugger(regs);
+#endif
+ print_backtrace( (unsigned long *)regs->gpr[1] );
+ panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d",
+ regs->nip,regs->link,address,current->comm,current->pid);
+}
+
--- /dev/null
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+rwlock_t imlist_lock = RW_LOCK_UNLOCKED;
+struct vm_struct * imlist = NULL;
+
+struct vm_struct *get_im_area(unsigned long size)
+{
+ unsigned long addr;
+ struct vm_struct **p, *tmp, *area;
+
+ area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
+ if (!area)
+ return NULL;
+ addr = IMALLOC_START;
+ write_lock(&imlist_lock);
+ for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
+ if (size + addr < (unsigned long) tmp->addr)
+ break;
+ addr = tmp->size + (unsigned long) tmp->addr;
+ if (addr > IMALLOC_END-size) {
+ write_unlock(&imlist_lock);
+ kfree(area);
+ return NULL;
+ }
+ }
+ area->flags = 0;
+ area->addr = (void *)addr;
+ area->size = size;
+ area->next = *p;
+ *p = area;
+ write_unlock(&imlist_lock);
+ return area;
+}
+
+void ifree(void * addr)
+{
+ struct vm_struct **p, *tmp;
+
+ if (!addr)
+ return;
+ if ((PAGE_SIZE-1) & (unsigned long) addr) {
+ printk(KERN_ERR "Trying to ifree() bad address (%p)\n", addr);
+ return;
+ }
+ write_lock(&imlist_lock);
+ for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
+ if (tmp->addr == addr) {
+ *p = tmp->next;
+ kfree(tmp);
+ write_unlock(&imlist_lock);
+ return;
+ }
+ }
+ write_unlock(&imlist_lock);
+ printk(KERN_ERR "Trying to ifree() nonexistent area (%p)\n", addr);
+}
+
--- /dev/null
+/*
+ *
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h> /* for initrd_* */
+#endif
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/abs_addr.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/rtas.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/tlb.h>
+#include <asm/Naca.h>
+#ifdef CONFIG_PPC_EEH
+#include <asm/eeh.h>
+#endif
+
+#include <asm/ppcdebug.h>
+
+#define PGTOKB(pages) (((pages) * PAGE_SIZE) >> 10)
+
+#ifdef CONFIG_PPC_ISERIES
+#include <asm/iSeries/iSeries_dma.h>
+#endif
+
+struct mmu_context_queue_t mmu_context_queue;
+int mem_init_done;
+unsigned long ioremap_bot = IMALLOC_BASE;
+
+static int boot_mapsize;
+static unsigned long totalram_pages;
+
+extern pgd_t swapper_pg_dir[];
+extern char __init_begin, __init_end;
+extern char __chrp_begin, __chrp_end;
+extern char __openfirmware_begin, __openfirmware_end;
+extern struct _of_tce_table of_tce_table[];
+extern char _start[], _end[];
+extern char _stext[], etext[];
+extern struct task_struct *current_set[NR_CPUS];
+extern struct Naca *naca;
+
+void mm_init_ppc64(void);
+
+unsigned long *pmac_find_end_of_memory(void);
+extern unsigned long *find_end_of_memory(void);
+
+extern pgd_t ioremap_dir[];
+pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
+
+static void map_io_page(unsigned long va, unsigned long pa, int flags);
+extern void die_if_kernel(char *,struct pt_regs *,long);
+
+unsigned long klimit = (unsigned long)_end;
+
+HPTE *Hash=0;
+unsigned long Hash_size=0;
+unsigned long _SDR1=0;
+unsigned long _ASR=0;
+
+/* max amount of RAM to use */
+unsigned long __max_memory;
+
+/* This is declared as we are using the more or less generic
+ * include/asm-ppc64/tlb.h file -- tgall
+ */
+mmu_gather_t mmu_gathers[NR_CPUS];
+
+int do_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+
+ if (pgtable_cache_size > high) {
+ do {
+ if (pgd_quicklist)
+ free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
+ if (pmd_quicklist)
+ free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
+ if (pte_quicklist)
+ free_page((unsigned long)pte_alloc_one_fast(0, 0)), ++freed;
+ } while (pgtable_cache_size > low);
+ }
+ return freed;
+}
+
+void show_mem(void)
+{
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0, cached = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (!atomic_read(&mem_map[i].count))
+ free++;
+ else
+ shared += atomic_read(&mem_map[i].count) - 1;
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%d pages in page table cache\n",(int)pgtable_cache_size);
+ show_buffers();
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ val->totalram = totalram_pages;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = atomic_read(&buffermem_pages);
+ val->totalhigh = 0;
+ val->freehigh = 0;
+ val->mem_unit = PAGE_SIZE;
+}
+
+void *
+ioremap(unsigned long addr, unsigned long size)
+{
+#ifdef CONFIG_PPC_ISERIES
+ return (void*)addr;
+#else
+#ifdef CONFIG_PPC_EEH
+ if(mem_init_done && (addr >> 60UL)) {
+ if (IS_EEH_TOKEN_DISABLED(addr))
+ return IO_TOKEN_TO_ADDR(addr);
+ return (void*)addr; /* already mapped address or EEH token. */
+ }
+#endif
+ return __ioremap(addr, size, _PAGE_NO_CACHE);
+#endif
+}
+
+extern struct vm_struct * get_im_area( unsigned long size );
+
+void *
+__ioremap(unsigned long addr, unsigned long size, unsigned long flags)
+{
+ unsigned long pa, ea, i;
+
+ /*
+ * Choose an address to map it to.
+ * Once the imalloc system is running, we use it.
+ * Before that, we map using addresses going
+ * up from ioremap_bot. imalloc will use
+ * the addresses from ioremap_bot through
+ * IMALLOC_END (0xE000001fffffffff)
+ *
+ */
+ pa = addr & PAGE_MASK;
+ size = PAGE_ALIGN(addr + size) - pa;
+
+ if (size == 0)
+ return NULL;
+
+ if (mem_init_done) {
+ struct vm_struct *area;
+ area = get_im_area(size);
+ if (area == 0)
+ return NULL;
+ ea = (unsigned long)(area->addr);
+ }
+ else {
+ ea = ioremap_bot;
+ ioremap_bot += size;
+ }
+
+ if ((flags & _PAGE_PRESENT) == 0)
+ flags |= pgprot_val(PAGE_KERNEL);
+ if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
+ flags |= _PAGE_GUARDED;
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ map_io_page(ea+i, pa+i, flags);
+ }
+
+ return (void *) (ea + (addr & ~PAGE_MASK));
+}
+
+void iounmap(void *addr)
+{
+#ifdef CONFIG_PPC_ISERIES
+ /* iSeries I/O Remap is a noop */
+ return;
+#else
+ /* DRENG / PPPBBB todo */
+ return;
+#endif
+}
+
+/*
+ * map_io_page currently only called by __ioremap
+ * map_io_page adds an entry to the ioremap page table
+ * and adds an entry to the HPT, possibly bolting it
+ */
+static void map_io_page(unsigned long ea, unsigned long pa, int flags)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long vsid;
+
+ if (mem_init_done) {
+ spin_lock(&ioremap_mm.page_table_lock);
+ pgdp = pgd_offset_i(ea);
+ pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);
+ ptep = pte_alloc(&ioremap_mm, pmdp, ea);
+
+ pa = absolute_to_phys(pa);
+ set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
+ spin_unlock(&ioremap_mm.page_table_lock);
+ } else {
+ /* If the mm subsystem is not fully up, we cannot create a
+ * linux page table entry for this mapping. Simply bolt an
+ * entry in the hardware page table.
+ */
+ vsid = get_kernel_vsid(ea);
+ make_pte(htab_data.htab,
+ (vsid << 28) | (ea & 0xFFFFFFF), // va (NOT the ea)
+ pa,
+ _PAGE_NO_CACHE | _PAGE_GUARDED | PP_RWXX,
+ htab_data.htab_hash_mask, 0);
+ }
+}
+
+void
+local_flush_tlb_all(void)
+{
+ /* Implemented to just flush the vmalloc area.
+ * vmalloc is the only user of flush_tlb_all.
+ */
+ local_flush_tlb_range( NULL, VMALLOC_START, VMALLOC_END );
+}
+
+void
+local_flush_tlb_mm(struct mm_struct *mm)
+{
+ if ( mm->map_count ) {
+ struct vm_area_struct *mp;
+ for ( mp = mm->mmap; mp != NULL; mp = mp->vm_next )
+ local_flush_tlb_range( mm, mp->vm_start, mp->vm_end );
+ }
+ else /* MIKEC: It is not clear why this is needed */
+ /* paulus: it is needed to clear out stale HPTEs
+ * when an address space (represented by an mm_struct)
+ * is being destroyed. */
+ local_flush_tlb_range( mm, USER_START, USER_END );
+}
+
+
+/*
+ * Callers should hold the mm->page_table_lock
+ */
+void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ unsigned long context = 0;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep;
+ pte_t pte;
+
+ switch( REGION_ID(vmaddr) ) {
+ case VMALLOC_REGION_ID:
+ pgd = pgd_offset_k( vmaddr );
+ break;
+ case IO_REGION_ID:
+ pgd = pgd_offset_i( vmaddr );
+ break;
+ case USER_REGION_ID:
+ pgd = pgd_offset( vma->vm_mm, vmaddr );
+ context = vma->vm_mm->context;
+ break;
+ default:
+ panic("local_flush_tlb_page: invalid region 0x%016lx", vmaddr);
+
+ }
+
+
+ if (!pgd_none(*pgd)) {
+ pmd = pmd_offset(pgd, vmaddr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset(pmd, vmaddr);
+ /* Check if HPTE might exist and flush it if so */
+ pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
+ if ( pte_val(pte) & _PAGE_HASHPTE ) {
+ flush_hash_page(context, vmaddr, pte);
+ }
+ }
+ }
+}
+
+void
+local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep;
+ pte_t pte;
+ unsigned long pgd_end, pmd_end;
+ unsigned long context;
+
+ if ( start >= end )
+ panic("flush_tlb_range: start (%016lx) greater than end (%016lx)\n", start, end );
+
+ if ( REGION_ID(start) != REGION_ID(end) )
+ panic("flush_tlb_range: start (%016lx) and end (%016lx) not in same region\n", start, end );
+
+ context = 0;
+
+ switch( REGION_ID(start) ) {
+ case VMALLOC_REGION_ID:
+ pgd = pgd_offset_k( start );
+ break;
+ case IO_REGION_ID:
+ pgd = pgd_offset_i( start );
+ break;
+ case USER_REGION_ID:
+ pgd = pgd_offset( mm, start );
+ context = mm->context;
+ break;
+ default:
+ panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end);
+
+ }
+
+ do {
+ pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
+ if ( pgd_end > end )
+ pgd_end = end;
+ if ( !pgd_none( *pgd ) ) {
+ pmd = pmd_offset( pgd, start );
+ do {
+ pmd_end = ( start + PMD_SIZE ) & PMD_MASK;
+ if ( pmd_end > end )
+ pmd_end = end;
+ if ( !pmd_none( *pmd ) ) {
+ ptep = pte_offset( pmd, start );
+ do {
+ if ( pte_val(*ptep) & _PAGE_HASHPTE ) {
+ pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
+ if ( pte_val(pte) & _PAGE_HASHPTE )
+ flush_hash_page( context, start, pte );
+ }
+ start += PAGE_SIZE;
+ ++ptep;
+ } while ( start < pmd_end );
+ }
+ else
+ start = pmd_end;
+ ++pmd;
+ } while ( start < pgd_end );
+ }
+ else
+ start = pgd_end;
+ ++pgd;
+ } while ( start < end );
+}
+
+
+void __init free_initmem(void)
+{
+ unsigned long a;
+ unsigned long num_freed_pages = 0;
+#define FREESEC(START,END,CNT) do { \
+ a = (unsigned long)(&START); \
+ for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
+ clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
+ set_page_count(mem_map+MAP_NR(a), 1); \
+ free_page(a); \
+ CNT++; \
+ } \
+} while (0)
+
+ FREESEC(__init_begin,__init_end,num_freed_pages);
+
+ printk ("Freeing unused kernel memory: %ldk init\n",
+ PGTOKB(num_freed_pages));
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ unsigned long xstart = start;
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(mem_map + MAP_NR(start));
+ set_page_count(mem_map+MAP_NR(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+ printk ("Freeing initrd memory: %ldk freed\n", (end - xstart) >> 10);
+}
+#endif
+
+
+
+/*
+ * Do very early mm setup.
+ */
+void __init mm_init_ppc64(void) {
+ struct Paca *paca;
+ unsigned long guard_page, index;
+
+ ppc_md.progress("MM:init", 0);
+
+ /* Reserve all contexts < FIRST_USER_CONTEXT for kernel use.
+ * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT)
+ * are stored on a stack/queue for easy allocation and deallocation.
+ */
+ mmu_context_queue.lock = SPIN_LOCK_UNLOCKED;
+ mmu_context_queue.head = 0;
+ mmu_context_queue.tail = NUM_USER_CONTEXT-1;
+ mmu_context_queue.size = NUM_USER_CONTEXT;
+ for(index=0; index < NUM_USER_CONTEXT ;index++) {
+ mmu_context_queue.elements[index] = index+FIRST_USER_CONTEXT;
+ }
+
+ /* Setup guard pages for the Paca's */
+ for (index = 0; index < NR_CPUS; index++) {
+ paca = &xPaca[index];
+ guard_page = ((unsigned long)paca) + 0x1000;
+ ppc_md.hpte_updateboltedpp(PP_RXRX, guard_page);
+ }
+
+ ppc_md.progress("MM:exit", 0x211);
+}
+
+
+
+/*
+ * Initialize the bootmem system and give it all the memory we
+ * have available.
+ */
+void __init do_init_bootmem(void)
+{
+ unsigned long i;
+ unsigned long start, bootmap_pages;
+ unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
+
+ PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: start\n");
+ /*
+ * Find an area to use for the bootmem bitmap. Calculate the size of
+ * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
+ * Add 1 additional page in case the address isn't page-aligned.
+ */
+ bootmap_pages = bootmem_bootmap_pages(total_pages);
+
+ start = (unsigned long)__a2p(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));
+ if( start == 0 ) {
+ udbg_printf("do_init_bootmem: failed to allocate a bitmap.\n");
+ udbg_printf("\tbootmap_pages = 0x%lx.\n", bootmap_pages);
+ PPCDBG_ENTER_DEBUGGER();
+ }
+
+ PPCDBG(PPCDBG_MMINIT, "\tstart = 0x%lx\n", start);
+ PPCDBG(PPCDBG_MMINIT, "\tbootmap_pages = 0x%lx\n", bootmap_pages);
+ PPCDBG(PPCDBG_MMINIT, "\tphysicalMemorySize = 0x%lx\n", naca->physicalMemorySize);
+
+ boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
+ PPCDBG(PPCDBG_MMINIT, "\tboot_mapsize = 0x%lx\n", boot_mapsize);
+
+ /* add all physical memory to the bootmem map */
+ for (i=0; i < lmb.memory.cnt ;i++) {
+ unsigned long physbase = lmb.memory.region[i].physbase;
+ unsigned long size = lmb.memory.region[i].size;
+ free_bootmem(physbase, size);
+ }
+ /* reserve the sections we're already using */
+ for (i=0; i < lmb.reserved.cnt ;i++) {
+ unsigned long physbase = lmb.reserved.region[i].physbase;
+ unsigned long size = lmb.reserved.region[i].size;
+#if 0 /* PPPBBB */
+ if ( (physbase == 0) && (size < (16<<20)) ) {
+ size = 16 << 20;
+ }
+#endif
+ reserve_bootmem(physbase, size);
+ }
+
+ PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: end\n");
+}
+
+/*
+ * paging_init() sets up the page tables - in fact we've already done this.
+ */
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES], i;
+
+ /*
+ * All pages are DMA-able so we put them all in the DMA zone.
+ */
+ zones_size[0] = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ for (i = 1; i < MAX_NR_ZONES; i++)
+ zones_size[i] = 0;
+ free_area_init(zones_size);
+}
+
+extern unsigned long prof_shift;
+extern unsigned long prof_len;
+extern unsigned int * prof_buffer;
+extern unsigned long dprof_shift;
+extern unsigned long dprof_len;
+extern unsigned int * dprof_buffer;
+
+void __init mem_init(void)
+{
+ extern char *sysmap;
+ extern unsigned long sysmap_size;
+ unsigned long addr;
+ int codepages = 0;
+ int datapages = 0;
+ int initpages = 0;
+ unsigned long va_rtas_base = (unsigned long)__va(rtas.base);
+
+ max_mapnr = max_low_pfn;
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ num_physpages = max_mapnr; /* RAM is assumed contiguous */
+ max_pfn = max_low_pfn;
+
+ totalram_pages += free_all_bootmem();
+
+ ifppcdebug(PPCDBG_MMINIT) {
+ udbg_printf("mem_init: totalram_pages = 0x%lx\n", totalram_pages);
+ udbg_printf("mem_init: va_rtas_base = 0x%lx\n", va_rtas_base);
+ udbg_printf("mem_init: va_rtas_end = 0x%lx\n", PAGE_ALIGN(va_rtas_base+rtas.size));
+ udbg_printf("mem_init: pinned start = 0x%lx\n", __va(0));
+ udbg_printf("mem_init: pinned end = 0x%lx\n", PAGE_ALIGN(klimit));
+ }
+
+ if ( sysmap_size )
+ for (addr = (unsigned long)sysmap;
+ addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
+ addr += PAGE_SIZE)
+ SetPageReserved(mem_map + MAP_NR(addr));
+
+ for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
+ addr += PAGE_SIZE) {
+ if (!PageReserved(mem_map + MAP_NR(addr)))
+ continue;
+ if (addr < (ulong) etext)
+ codepages++;
+
+ else if (addr >= (unsigned long)&__init_begin
+ && addr < (unsigned long)&__init_end)
+ initpages++;
+ else if (addr < klimit)
+ datapages++;
+ }
+
+ printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
+ (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
+ codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
+ initpages<< (PAGE_SHIFT-10),
+ PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM()));
+ mem_init_done = 1;
+
+ /* set the last page of each hardware interrupt stack to be protected */
+ initialize_paca_hardware_interrupt_stack();
+
+#ifdef CONFIG_PPC_ISERIES
+ create_virtual_bus_tce_table();
+ /* HACK HACK This allows the iSeries profiling to use /proc/profile */
+ prof_shift = dprof_shift;
+ prof_len = dprof_len;
+ prof_buffer = dprof_buffer;
+#endif
+}
+
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean. We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+void flush_dcache_page(struct page *page)
+{
+ clear_bit(PG_arch_1, &page->flags);
+}
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (page->mapping && !PageReserved(page)
+ && !test_bit(PG_arch_1, &page->flags)) {
+ __flush_dcache_icache(page_address(page));
+ set_bit(PG_arch_1, &page->flags);
+ }
+}
+
+void clear_user_page(void *page, unsigned long vaddr)
+{
+ clear_page(page);
+}
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr)
+{
+ copy_page(vto, vfrom);
+ __flush_dcache_icache(vto);
+}
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ unsigned long maddr;
+
+ maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
+ flush_icache_range(maddr, maddr + len);
+}
--- /dev/null
+OUTPUT_ARCH(powerpc)
+SEARCH_DIR(/lib); SEARCH_DIR(/usr/lib); SEARCH_DIR(/usr/local/lib); SEARCH_DIR(/usr/local/powerpc-any-elf/lib);
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .rel.text : { *(.rel.text) }
+ .rela.text : { *(.rela.text) }
+ .rel.data : { *(.rel.data) }
+ .rela.data : { *(.rela.data) }
+ .rel.rodata : { *(.rel.rodata) }
+ .rela.rodata : { *(.rela.rodata) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+/* .init : { *(.init) } =0*/
+ .plt : { *(.plt) }
+ .text :
+ {
+ *(.text)
+ *(.fixup)
+ *(.got1)
+ }
+ . = ALIGN(4096);
+ _etext = .;
+ PROVIDE (etext = .);
+ .rodata :
+ {
+ *(.rodata)
+ *(.rodata1)
+ }
+ .fini : { *(.fini) } =0
+ .ctors : { *(.ctors) }
+ .dtors : { *(.dtors) }
+ /* Read-write section, merged into data segment: */
+ . = (. + 0x0FFF) & 0xFFFFFFFFFFFFF000;
+ .data :
+ {
+ *(.data)
+ *(.data1)
+ *(.sdata)
+ *(.sdata2)
+ *(.got.plt) *(.got)
+ *(.dynamic)
+ CONSTRUCTORS
+ }
+ . = ALIGN(4096);
+ _edata = .;
+ PROVIDE (edata = .);
+
+ .fixup : { *(.fixup) }
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
+ __start___kallsyms = .; /* All kernel symbols */
+ __kallsyms : { *(__kallsyms) }
+ __stop___kallsyms = .;
+
+ . = ALIGN(16384); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.page_aligned) }
+
+ . = ALIGN(128);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ . = ALIGN(4096);
+ __init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : {
+ *(.data.init);
+ __vtop_table_begin = .;
+ *(.vtop_fixup);
+ __vtop_table_end = .;
+ __ptov_table_begin = .;
+ *(.ptov_fixup);
+ __ptov_table_end = .;
+ }
+ . = ALIGN(16);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+
+
+ . = ALIGN(4096);
+ __init_end = .;
+
+ __chrp_begin = .;
+ .text.chrp : { *(.text.chrp) }
+ .data.chrp : { *(.data.chrp) }
+ . = ALIGN(4096);
+ __chrp_end = .;
+
+ . = ALIGN(4096);
+ __openfirmware_begin = .;
+ .text.openfirmware : { *(.text.openfirmware) }
+ .data.openfirmware : { *(.data.openfirmware) }
+ . = ALIGN(4096);
+ __openfirmware_end = .;
+
+ __toc_start = .;
+ .toc :
+ {
+ *(.toc)
+ }
+ . = ALIGN(4096);
+ __toc_end = .;
+
+ __bss_start = .;
+ .bss :
+ {
+ *(.sbss) *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ }
+
+ . = ALIGN(4096);
+ _end = . ;
+ PROVIDE (end = .);
+}
--- /dev/null
+# Makefile for xmon
+
+EXTRA_CFLAGS = -mno-minimal-toc
+
+O_TARGET = x.o
+
+obj-y := start.o xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/* ANSI and traditional C compatability macros
+ Copyright 1991, 1992 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+/* ANSI and traditional C compatibility macros
+
+ ANSI C is assumed if __STDC__ is #defined.
+
+ Macro ANSI C definition Traditional C definition
+ ----- ---- - ---------- ----------- - ----------
+ PTR `void *' `char *'
+ LONG_DOUBLE `long double' `double'
+ VOLATILE `volatile' `'
+ SIGNED `signed' `'
+ PTRCONST `void *const' `char *'
+ ANSI_PROTOTYPES 1 not defined
+
+ CONST is also defined, but is obsolete. Just use const.
+
+ DEFUN (name, arglist, args)
+
+ Defines function NAME.
+
+ ARGLIST lists the arguments, separated by commas and enclosed in
+ parentheses. ARGLIST becomes the argument list in traditional C.
+
+ ARGS list the arguments with their types. It becomes a prototype in
+ ANSI C, and the type declarations in traditional C. Arguments should
+ be separated with `AND'. For functions with a variable number of
+ arguments, the last thing listed should be `DOTS'.
+
+ DEFUN_VOID (name)
+
+ Defines a function NAME, which takes no arguments.
+
+ obsolete -- EXFUN (name, (prototype)) -- obsolete.
+
+ Replaced by PARAMS. Do not use; will disappear someday soon.
+ Was used in external function declarations.
+ In ANSI C it is `NAME PROTOTYPE' (so PROTOTYPE should be enclosed in
+ parentheses). In traditional C it is `NAME()'.
+ For a function that takes no arguments, PROTOTYPE should be `(void)'.
+
+ PARAMS ((args))
+
+ We could use the EXFUN macro to handle prototype declarations, but
+ the name is misleading and the result is ugly. So we just define a
+ simple macro to handle the parameter lists, as in:
+
+ static int foo PARAMS ((int, char));
+
+ This produces: `static int foo();' or `static int foo (int, char);'
+
+ EXFUN would have done it like this:
+
+ static int EXFUN (foo, (int, char));
+
+ but the function is not external...and it's hard to visually parse
+ the function name out of the mess. EXFUN should be considered
+ obsolete; new code should be written to use PARAMS.
+
+ For example:
+ extern int printf PARAMS ((CONST char *format DOTS));
+ int DEFUN(fprintf, (stream, format),
+ FILE *stream AND CONST char *format DOTS) { ... }
+ void DEFUN_VOID(abort) { ... }
+*/
+
+#ifndef _ANSIDECL_H
+
+#define _ANSIDECL_H 1
+
+
+/* Every source file includes this file,
+ so they will all get the switch for lint. */
+/* LINTLIBRARY */
+
+
+#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(WIN32)
+/* All known AIX compilers implement these things (but don't always
+ define __STDC__). The RISC/OS MIPS compiler defines these things
+ in SVR4 mode, but does not define __STDC__. */
+
+#define PTR void *
+#define PTRCONST void *CONST
+#define LONG_DOUBLE long double
+
+#define AND ,
+#define NOARGS void
+#define CONST const
+#define VOLATILE volatile
+#define SIGNED signed
+#define DOTS , ...
+
+#define EXFUN(name, proto) name proto
+#define DEFUN(name, arglist, args) name(args)
+#define DEFUN_VOID(name) name(void)
+
+#define PROTO(type, name, arglist) type name arglist
+#define PARAMS(paramlist) paramlist
+#define ANSI_PROTOTYPES 1
+
+#else /* Not ANSI C. */
+
+#define PTR char *
+#define PTRCONST PTR
+#define LONG_DOUBLE double
+
+#define AND ;
+#define NOARGS
+#define CONST
+#ifndef const /* some systems define it in header files for non-ansi mode */
+#define const
+#endif
+#define VOLATILE
+#define SIGNED
+#define DOTS
+
+#define EXFUN(name, proto) name()
+#define DEFUN(name, arglist, args) name arglist args;
+#define DEFUN_VOID(name) name()
+#define PROTO(type, name, arglist) type name ()
+#define PARAMS(paramlist) ()
+
+#endif /* ANSI C. */
+
+#endif /* ansidecl.h */
--- /dev/null
+typedef int FILE;
+extern FILE *xmon_stdin, *xmon_stdout;
+#define EOF (-1)
+#define stdin xmon_stdin
+#define stdout xmon_stdout
+#define printf xmon_printf
+#define fprintf xmon_fprintf
+#define fputs xmon_fputs
+#define fgets xmon_fgets
+#define putchar xmon_putchar
+#define getchar xmon_getchar
+#define putc xmon_putc
+#define getc xmon_getc
+#define fopen(n, m) NULL
+#define fflush(f) do {} while (0)
+#define fclose(f) do {} while (0)
+extern char *fgets(char *, int, void *);
+extern void xmon_printf(const char *, ...);
+extern void xmon_fprintf(void *, const char *, ...);
+extern void xmon_sprintf(char *, const char *, ...);
+
+#define perror(s) printf("%s: no files!\n", (s))
--- /dev/null
+/* ppc-dis.c -- Disassemble PowerPC instructions
+ Copyright 1994 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Cygnus Support
+
+This file is part of GDB, GAS, and the GNU binutils.
+
+GDB, GAS, and the GNU binutils are free software; you can redistribute
+them and/or modify them under the terms of the GNU General Public
+License as published by the Free Software Foundation; either version
+2, or (at your option) any later version.
+
+GDB, GAS, and the GNU binutils are distributed in the hope that they
+will be useful, but WITHOUT ANY WARRANTY; without even the implied
+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this file; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include "nonstdio.h"
+#include "ansidecl.h"
+#include "ppc.h"
+
+static int print_insn_powerpc PARAMS ((FILE *, unsigned long insn,
+ unsigned long memaddr, int dialect));
+
+extern void print_address PARAMS((unsigned long memaddr));
+
+/* Print a big endian PowerPC instruction. For convenience, also
+ disassemble instructions supported by the Motorola PowerPC 601. */
+
+int
+print_insn_big_powerpc (FILE *out, unsigned long insn, unsigned long memaddr)
+{
+ return print_insn_powerpc (out, insn, memaddr,
+ PPC_OPCODE_PPC | PPC_OPCODE_601);
+}
+
+/* Print a PowerPC or POWER instruction. */
+
+static int
+print_insn_powerpc (FILE *out, unsigned long insn, unsigned long memaddr,
+ int dialect)
+{
+ const struct powerpc_opcode *opcode;
+ const struct powerpc_opcode *opcode_end;
+ unsigned long op;
+
+ /* Get the major opcode of the instruction. */
+ op = PPC_OP (insn);
+
+ /* Find the first match in the opcode table. We could speed this up
+ a bit by doing a binary search on the major opcode. */
+ opcode_end = powerpc_opcodes + powerpc_num_opcodes;
+ for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++)
+ {
+ unsigned long table_op;
+ const unsigned char *opindex;
+ const struct powerpc_operand *operand;
+ int invalid;
+ int need_comma;
+ int need_paren;
+
+ table_op = PPC_OP (opcode->opcode);
+ if (op < table_op)
+ break;
+ if (op > table_op)
+ continue;
+
+ if ((insn & opcode->mask) != opcode->opcode
+ || (opcode->flags & dialect) == 0)
+ continue;
+
+ /* Make two passes over the operands. First see if any of them
+ have extraction functions, and, if they do, make sure the
+ instruction is valid. */
+ invalid = 0;
+ for (opindex = opcode->operands; *opindex != 0; opindex++)
+ {
+ operand = powerpc_operands + *opindex;
+ if (operand->extract)
+ (*operand->extract) (insn, &invalid);
+ }
+ if (invalid)
+ continue;
+
+ /* The instruction is valid. */
+ fprintf(out, "%s", opcode->name);
+ if (opcode->operands[0] != 0)
+ fprintf(out, "\t");
+
+ /* Now extract and print the operands. */
+ need_comma = 0;
+ need_paren = 0;
+ for (opindex = opcode->operands; *opindex != 0; opindex++)
+ {
+ long value;
+
+ operand = powerpc_operands + *opindex;
+
+ /* Operands that are marked FAKE are simply ignored. We
+ already made sure that the extract function considered
+ the instruction to be valid. */
+ if ((operand->flags & PPC_OPERAND_FAKE) != 0)
+ continue;
+
+ /* Extract the value from the instruction. */
+ if (operand->extract)
+ value = (*operand->extract) (insn, (int *) 0);
+ else
+ {
+ value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
+ if ((operand->flags & PPC_OPERAND_SIGNED) != 0
+ && (value & (1 << (operand->bits - 1))) != 0)
+ value -= 1 << operand->bits;
+ }
+
+ /* If the operand is optional, and the value is zero, don't
+ print anything. */
+ if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0
+ && (operand->flags & PPC_OPERAND_NEXT) == 0
+ && value == 0)
+ continue;
+
+ if (need_comma)
+ {
+ fprintf(out, ",");
+ need_comma = 0;
+ }
+
+ /* Print the operand as directed by the flags. */
+ if ((operand->flags & PPC_OPERAND_GPR) != 0)
+ fprintf(out, "r%ld", value);
+ else if ((operand->flags & PPC_OPERAND_FPR) != 0)
+ fprintf(out, "f%ld", value);
+ else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0)
+ print_address (memaddr + value);
+ else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0)
+ print_address (value & 0xffffffff);
+ else if ((operand->flags & PPC_OPERAND_CR) == 0
+ || (dialect & PPC_OPCODE_PPC) == 0)
+ fprintf(out, "%ld", value);
+ else
+ {
+ if (operand->bits == 3)
+ fprintf(out, "cr%d", value);
+ else
+ {
+ static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
+ int cr;
+ int cc;
+
+ cr = value >> 2;
+ if (cr != 0)
+ fprintf(out, "4*cr%d", cr);
+ cc = value & 3;
+ if (cc != 0)
+ {
+ if (cr != 0)
+ fprintf(out, "+");
+ fprintf(out, "%s", cbnames[cc]);
+ }
+ }
+ }
+
+ if (need_paren)
+ {
+ fprintf(out, ")");
+ need_paren = 0;
+ }
+
+ if ((operand->flags & PPC_OPERAND_PARENS) == 0)
+ need_comma = 1;
+ else
+ {
+ fprintf(out, "(");
+ need_paren = 1;
+ }
+ }
+
+ /* We have found and printed an instruction; return. */
+ return 4;
+ }
+
+ /* We could not find a match. */
+ fprintf(out, ".long 0x%lx", insn);
+
+ return 4;
+}
--- /dev/null
+/* ppc-opc.c -- PowerPC opcode list
+ Copyright 1994 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Cygnus Support
+
+This file is part of GDB, GAS, and the GNU binutils.
+
+GDB, GAS, and the GNU binutils are free software; you can redistribute
+them and/or modify them under the terms of the GNU General Public
+License as published by the Free Software Foundation; either version
+2, or (at your option) any later version.
+
+GDB, GAS, and the GNU binutils are distributed in the hope that they
+will be useful, but WITHOUT ANY WARRANTY; without even the implied
+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this file; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include <linux/posix_types.h>
+#include "ansidecl.h"
+#include "ppc.h"
+
+/* This file holds the PowerPC opcode table. The opcode table
+ includes almost all of the extended instruction mnemonics. This
+ permits the disassembler to use them, and simplifies the assembler
+ logic, at the cost of increasing the table size. The table is
+ strictly constant data, so the compiler should be able to put it in
+ the .text section.
+
+ This file also holds the operand table. All knowledge about
+ inserting operands into instructions and vice-versa is kept in this
+ file. */
+\f
+/* Local insertion and extraction functions. */
+
+static unsigned long insert_bat PARAMS ((unsigned long, long, const char **));
+static long extract_bat PARAMS ((unsigned long, int *));
+static unsigned long insert_bba PARAMS ((unsigned long, long, const char **));
+static long extract_bba PARAMS ((unsigned long, int *));
+static unsigned long insert_bd PARAMS ((unsigned long, long, const char **));
+static long extract_bd PARAMS ((unsigned long, int *));
+static unsigned long insert_bdm PARAMS ((unsigned long, long, const char **));
+static long extract_bdm PARAMS ((unsigned long, int *));
+static unsigned long insert_bdp PARAMS ((unsigned long, long, const char **));
+static long extract_bdp PARAMS ((unsigned long, int *));
+static unsigned long insert_bo PARAMS ((unsigned long, long, const char **));
+static long extract_bo PARAMS ((unsigned long, int *));
+static unsigned long insert_boe PARAMS ((unsigned long, long, const char **));
+static long extract_boe PARAMS ((unsigned long, int *));
+static unsigned long insert_ds PARAMS ((unsigned long, long, const char **));
+static long extract_ds PARAMS ((unsigned long, int *));
+static unsigned long insert_li PARAMS ((unsigned long, long, const char **));
+static long extract_li PARAMS ((unsigned long, int *));
+static unsigned long insert_mbe PARAMS ((unsigned long, long, const char **));
+static long extract_mbe PARAMS ((unsigned long, int *));
+static unsigned long insert_mb6 PARAMS ((unsigned long, long, const char **));
+static long extract_mb6 PARAMS ((unsigned long, int *));
+static unsigned long insert_nb PARAMS ((unsigned long, long, const char **));
+static long extract_nb PARAMS ((unsigned long, int *));
+static unsigned long insert_nsi PARAMS ((unsigned long, long, const char **));
+static long extract_nsi PARAMS ((unsigned long, int *));
+static unsigned long insert_ral PARAMS ((unsigned long, long, const char **));
+static unsigned long insert_ram PARAMS ((unsigned long, long, const char **));
+static unsigned long insert_ras PARAMS ((unsigned long, long, const char **));
+static unsigned long insert_rbs PARAMS ((unsigned long, long, const char **));
+static long extract_rbs PARAMS ((unsigned long, int *));
+static unsigned long insert_sh6 PARAMS ((unsigned long, long, const char **));
+static long extract_sh6 PARAMS ((unsigned long, int *));
+static unsigned long insert_spr PARAMS ((unsigned long, long, const char **));
+static long extract_spr PARAMS ((unsigned long, int *));
+static unsigned long insert_tbr PARAMS ((unsigned long, long, const char **));
+static long extract_tbr PARAMS ((unsigned long, int *));
+\f
+/* The operands table.
+
+ The fields are bits, shift, signed, insert, extract, flags. */
+
+const struct powerpc_operand powerpc_operands[] =
+{
+ /* The zero index is used to indicate the end of the list of
+ operands. */
+#define UNUSED (0)
+ { 0, 0, 0, 0, 0 },
+
+ /* The BA field in an XL form instruction. */
+#define BA (1)
+#define BA_MASK (0x1f << 16)
+ { 5, 16, 0, 0, PPC_OPERAND_CR },
+
+ /* The BA field in an XL form instruction when it must be the same
+ as the BT field in the same instruction. */
+#define BAT (2)
+ { 5, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE },
+
+ /* The BB field in an XL form instruction. */
+#define BB (3)
+#define BB_MASK (0x1f << 11)
+ { 5, 11, 0, 0, PPC_OPERAND_CR },
+
+ /* The BB field in an XL form instruction when it must be the same
+ as the BA field in the same instruction. */
+#define BBA (4)
+ { 5, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE },
+
+ /* The BD field in a B form instruction. The lower two bits are
+ forced to zero. */
+#define BD (5)
+ { 16, 0, insert_bd, extract_bd, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
+
+ /* The BD field in a B form instruction when absolute addressing is
+ used. */
+#define BDA (6)
+ { 16, 0, insert_bd, extract_bd, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
+
+ /* The BD field in a B form instruction when the - modifier is used.
+ This sets the y bit of the BO field appropriately. */
+#define BDM (7)
+ { 16, 0, insert_bdm, extract_bdm,
+ PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
+
+ /* The BD field in a B form instruction when the - modifier is used
+ and absolute address is used. */
+#define BDMA (8)
+ { 16, 0, insert_bdm, extract_bdm,
+ PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
+
+ /* The BD field in a B form instruction when the + modifier is used.
+ This sets the y bit of the BO field appropriately. */
+#define BDP (9)
+ { 16, 0, insert_bdp, extract_bdp,
+ PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
+
+ /* The BD field in a B form instruction when the + modifier is used
+ and absolute addressing is used. */
+#define BDPA (10)
+ { 16, 0, insert_bdp, extract_bdp,
+ PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
+
+ /* The BF field in an X or XL form instruction. */
+#define BF (11)
+ { 3, 23, 0, 0, PPC_OPERAND_CR },
+
+ /* An optional BF field. This is used for comparison instructions,
+ in which an omitted BF field is taken as zero. */
+#define OBF (12)
+ { 3, 23, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+
+ /* The BFA field in an X or XL form instruction. */
+#define BFA (13)
+ { 3, 18, 0, 0, PPC_OPERAND_CR },
+
+ /* The BI field in a B form or XL form instruction. */
+#define BI (14)
+#define BI_MASK (0x1f << 16)
+ { 5, 16, 0, 0, PPC_OPERAND_CR },
+
+ /* The BO field in a B form instruction. Certain values are
+ illegal. */
+#define BO (15)
+#define BO_MASK (0x1f << 21)
+ { 5, 21, insert_bo, extract_bo, 0 },
+
+ /* The BO field in a B form instruction when the + or - modifier is
+ used. This is like the BO field, but it must be even. */
+#define BOE (16)
+ { 5, 21, insert_boe, extract_boe, 0 },
+
+ /* The BT field in an X or XL form instruction. */
+#define BT (17)
+ { 5, 21, 0, 0, PPC_OPERAND_CR },
+
+ /* The condition register number portion of the BI field in a B form
+ or XL form instruction. This is used for the extended
+ conditional branch mnemonics, which set the lower two bits of the
+ BI field. This field is optional. */
+#define CR (18)
+ { 3, 18, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+
+ /* The D field in a D form instruction. This is a displacement off
+ a register, and implies that the next operand is a register in
+ parentheses. */
+#define D (19)
+ { 16, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
+
+ /* The DS field in a DS form instruction. This is like D, but the
+ lower two bits are forced to zero. */
+#define DS (20)
+ { 16, 0, insert_ds, extract_ds, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
+
+ /* The FL1 field in a POWER SC form instruction. */
+#define FL1 (21)
+ { 4, 12, 0, 0, 0 },
+
+ /* The FL2 field in a POWER SC form instruction. */
+#define FL2 (22)
+ { 3, 2, 0, 0, 0 },
+
+ /* The FLM field in an XFL form instruction. */
+#define FLM (23)
+ { 8, 17, 0, 0, 0 },
+
+ /* The FRA field in an X or A form instruction. */
+#define FRA (24)
+#define FRA_MASK (0x1f << 16)
+ { 5, 16, 0, 0, PPC_OPERAND_FPR },
+
+ /* The FRB field in an X or A form instruction. */
+#define FRB (25)
+#define FRB_MASK (0x1f << 11)
+ { 5, 11, 0, 0, PPC_OPERAND_FPR },
+
+ /* The FRC field in an A form instruction. */
+#define FRC (26)
+#define FRC_MASK (0x1f << 6)
+ { 5, 6, 0, 0, PPC_OPERAND_FPR },
+
+ /* The FRS field in an X form instruction or the FRT field in a D, X
+ or A form instruction. */
+#define FRS (27)
+#define FRT (FRS)
+ { 5, 21, 0, 0, PPC_OPERAND_FPR },
+
+ /* The FXM field in an XFX instruction. */
+#define FXM (28)
+#define FXM_MASK (0xff << 12)
+ { 8, 12, 0, 0, 0 },
+
+ /* The L field in a D or X form instruction. */
+#define L (29)
+ { 1, 21, 0, 0, PPC_OPERAND_OPTIONAL },
+
+ /* The LEV field in a POWER SC form instruction. */
+#define LEV (30)
+ { 7, 5, 0, 0, 0 },
+
+ /* The LI field in an I form instruction. The lower two bits are
+ forced to zero. */
+#define LI (31)
+ { 26, 0, insert_li, extract_li, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
+
+ /* The LI field in an I form instruction when used as an absolute
+ address. */
+#define LIA (32)
+ { 26, 0, insert_li, extract_li, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
+
+ /* The MB field in an M form instruction. */
+#define MB (33)
+#define MB_MASK (0x1f << 6)
+ { 5, 6, 0, 0, 0 },
+
+ /* The ME field in an M form instruction. */
+#define ME (34)
+#define ME_MASK (0x1f << 1)
+ { 5, 1, 0, 0, 0 },
+
+ /* The MB and ME fields in an M form instruction expressed a single
+ operand which is a bitmask indicating which bits to select. This
+ is a two operand form using PPC_OPERAND_NEXT. See the
+ description in opcode/ppc.h for what this means. */
+#define MBE (35)
+ { 5, 6, 0, 0, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
+ { 32, 0, insert_mbe, extract_mbe, 0 },
+
+ /* The MB or ME field in an MD or MDS form instruction. The high
+ bit is wrapped to the low end. */
+#define MB6 (37)
+#define ME6 (MB6)
+#define MB6_MASK (0x3f << 5)
+ { 6, 5, insert_mb6, extract_mb6, 0 },
+
+ /* The NB field in an X form instruction. The value 32 is stored as
+ 0. */
+#define NB (38)
+ { 6, 11, insert_nb, extract_nb, 0 },
+
+ /* The NSI field in a D form instruction. This is the same as the
+ SI field, only negated. */
+#define NSI (39)
+ { 16, 0, insert_nsi, extract_nsi,
+ PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED },
+
+ /* The RA field in an D, DS, X, XO, M, or MDS form instruction. */
+#define RA (40)
+#define RA_MASK (0x1f << 16)
+ { 5, 16, 0, 0, PPC_OPERAND_GPR },
+
+ /* The RA field in a D or X form instruction which is an updating
+ load, which means that the RA field may not be zero and may not
+ equal the RT field. */
+#define RAL (41)
+ { 5, 16, insert_ral, 0, PPC_OPERAND_GPR },
+
+ /* The RA field in an lmw instruction, which has special value
+ restrictions. */
+#define RAM (42)
+ { 5, 16, insert_ram, 0, PPC_OPERAND_GPR },
+
+ /* The RA field in a D or X form instruction which is an updating
+ store or an updating floating point load, which means that the RA
+ field may not be zero. */
+#define RAS (43)
+ { 5, 16, insert_ras, 0, PPC_OPERAND_GPR },
+
+ /* The RB field in an X, XO, M, or MDS form instruction. */
+#define RB (44)
+#define RB_MASK (0x1f << 11)
+ { 5, 11, 0, 0, PPC_OPERAND_GPR },
+
+ /* The RB field in an X form instruction when it must be the same as
+ the RS field in the instruction. This is used for extended
+ mnemonics like mr. */
+#define RBS (45)
+ { 5, 1, insert_rbs, extract_rbs, PPC_OPERAND_FAKE },
+
+ /* The RS field in a D, DS, X, XFX, XS, M, MD or MDS form
+ instruction or the RT field in a D, DS, X, XFX or XO form
+ instruction. */
+#define RS (46)
+#define RT (RS)
+#define RT_MASK (0x1f << 21)
+ { 5, 21, 0, 0, PPC_OPERAND_GPR },
+
+ /* The SH field in an X or M form instruction. */
+#define SH (47)
+#define SH_MASK (0x1f << 11)
+ { 5, 11, 0, 0, 0 },
+
+ /* The SH field in an MD form instruction. This is split. */
+#define SH6 (48)
+#define SH6_MASK ((0x1f << 11) | (1 << 1))
+ { 6, 1, insert_sh6, extract_sh6, 0 },
+
+ /* The SI field in a D form instruction. */
+#define SI (49)
+ { 16, 0, 0, 0, PPC_OPERAND_SIGNED },
+
+ /* The SI field in a D form instruction when we accept a wide range
+ of positive values. */
+#define SISIGNOPT (50)
+ { 16, 0, 0, 0, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
+
+ /* The SPR field in an XFX form instruction. This is flipped--the
+ lower 5 bits are stored in the upper 5 and vice- versa. */
+#define SPR (51)
+#define SPR_MASK (0x3ff << 11)
+ { 10, 11, insert_spr, extract_spr, 0 },
+
+ /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */
+#define SPRBAT (52)
+#define SPRBAT_MASK (0x3 << 17)
+ { 2, 17, 0, 0, 0 },
+
+ /* The SPRG register number in an XFX form m[ft]sprg instruction. */
+#define SPRG (53)
+#define SPRG_MASK (0x3 << 16)
+ { 2, 16, 0, 0, 0 },
+
+ /* The SR field in an X form instruction. */
+#define SR (54)
+ { 4, 16, 0, 0, 0 },
+
+ /* The SV field in a POWER SC form instruction. */
+#define SV (55)
+ { 14, 2, 0, 0, 0 },
+
+ /* The TBR field in an XFX form instruction. This is like the SPR
+ field, but it is optional. */
+#define TBR (56)
+ { 10, 11, insert_tbr, extract_tbr, PPC_OPERAND_OPTIONAL },
+
+ /* The TO field in a D or X form instruction. */
+#define TO (57)
+#define TO_MASK (0x1f << 21)
+ { 5, 21, 0, 0, 0 },
+
+ /* The U field in an X form instruction. */
+#define U (58)
+ { 4, 12, 0, 0, 0 },
+
+ /* The UI field in a D form instruction. */
+#define UI (59)
+ { 16, 0, 0, 0, 0 },
+};
+
+/* The functions used to insert and extract complicated operands. */
+
+/* The BA field in an XL form instruction when it must be the same as
+ the BT field in the same instruction. This operand is marked FAKE.
+ The insertion function just copies the BT field into the BA field,
+ and the extraction function just checks that the fields are the
+ same. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_bat (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | (((insn >> 21) & 0x1f) << 16);
+}
+
+static long
+extract_bat (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if (invalid != (int *) NULL
+ && ((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* The BB field in an XL form instruction when it must be the same as
+ the BA field in the same instruction. This operand is marked FAKE.
+ The insertion function just copies the BA field into the BB field,
+ and the extraction function just checks that the fields are the
+ same. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_bba (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | (((insn >> 16) & 0x1f) << 11);
+}
+
+static long
+extract_bba (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if (invalid != (int *) NULL
+ && ((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* The BD field in a B form instruction. The lower two bits are
+ forced to zero. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_bd (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | (value & 0xfffc);
+}
+
+/*ARGSUSED*/
+static long
+extract_bd (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if ((insn & 0x8000) != 0)
+ return (insn & 0xfffc) - 0x10000;
+ else
+ return insn & 0xfffc;
+}
+
+/* The BD field in a B form instruction when the - modifier is used.
+ This modifier means that the branch is not expected to be taken.
+ We must set the y bit of the BO field to 1 if the offset is
+ negative. When extracting, we require that the y bit be 1 and that
+ the offset be positive, since if the y bit is 0 we just want to
+ print the normal form of the instruction. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_bdm (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if ((value & 0x8000) != 0)
+ insn |= 1 << 21;
+ return insn | (value & 0xfffc);
+}
+
+static long
+extract_bdm (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if (invalid != (int *) NULL
+ && ((insn & (1 << 21)) == 0
+ || (insn & (1 << 15)) == 0))
+ *invalid = 1;
+ if ((insn & 0x8000) != 0)
+ return (insn & 0xfffc) - 0x10000;
+ else
+ return insn & 0xfffc;
+}
+
+/* The BD field in a B form instruction when the + modifier is used.
+ This is like BDM, above, except that the branch is expected to be
+ taken. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_bdp (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if ((value & 0x8000) == 0)
+ insn |= 1 << 21;
+ return insn | (value & 0xfffc);
+}
+
+static long
+extract_bdp (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if (invalid != (int *) NULL
+ && ((insn & (1 << 21)) == 0
+ || (insn & (1 << 15)) != 0))
+ *invalid = 1;
+ if ((insn & 0x8000) != 0)
+ return (insn & 0xfffc) - 0x10000;
+ else
+ return insn & 0xfffc;
+}
+
+/* Check for legal values of a BO field. */
+
+static int
+valid_bo (long value)
+{
+ /* Certain encodings have bits that are required to be zero. These
+ are (z must be zero, y may be anything):
+ 001zy
+ 011zy
+ 1z00y
+ 1z01y
+ 1z1zz
+ */
+ switch (value & 0x14)
+ {
+ default:
+ case 0:
+ return 1;
+ case 0x4:
+ return (value & 0x2) == 0;
+ case 0x10:
+ return (value & 0x8) == 0;
+ case 0x14:
+ return value == 0x14;
+ }
+}
+
+/* The BO field in a B form instruction. Warn about attempts to set
+ the field to an illegal value. */
+
+static unsigned long
+insert_bo (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (errmsg != (const char **) NULL
+ && ! valid_bo (value))
+ *errmsg = "invalid conditional option";
+ return insn | ((value & 0x1f) << 21);
+}
+
+static long
+extract_bo (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ long value;
+
+ value = (insn >> 21) & 0x1f;
+ if (invalid != (int *) NULL
+ && ! valid_bo (value))
+ *invalid = 1;
+ return value;
+}
+
+/* The BO field in a B form instruction when the + or - modifier is
+ used. This is like the BO field, but it must be even. When
+ extracting it, we force it to be even. */
+
+static unsigned long
+insert_boe (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (errmsg != (const char **) NULL)
+ {
+ if (! valid_bo (value))
+ *errmsg = "invalid conditional option";
+ else if ((value & 1) != 0)
+ *errmsg = "attempt to set y bit when using + or - modifier";
+ }
+ return insn | ((value & 0x1f) << 21);
+}
+
+static long
+extract_boe (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ long value;
+
+ value = (insn >> 21) & 0x1f;
+ if (invalid != (int *) NULL
+ && ! valid_bo (value))
+ *invalid = 1;
+ return value & 0x1e;
+}
+
+/* The DS field in a DS form instruction. This is like D, but the
+ lower two bits are forced to zero. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_ds (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | (value & 0xfffc);
+}
+
+/*ARGSUSED*/
+static long
+extract_ds (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if ((insn & 0x8000) != 0)
+ return (insn & 0xfffc) - 0x10000;
+ else
+ return insn & 0xfffc;
+}
+
+/* The LI field in an I form instruction. The lower two bits are
+ forced to zero. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_li (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | (value & 0x3fffffc);
+}
+
+/*ARGSUSED*/
+static long
+extract_li (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if ((insn & 0x2000000) != 0)
+ return (insn & 0x3fffffc) - 0x4000000;
+ else
+ return insn & 0x3fffffc;
+}
+
+/* The MB and ME fields in an M form instruction expressed as a single
+ operand which is itself a bitmask. The extraction function always
+ marks it as invalid, since we never want to recognize an
+ instruction which uses a field of this type. */
+
+static unsigned long
+insert_mbe (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ unsigned long uval;
+ int mb, me;
+
+ uval = value;
+
+ if (uval == 0)
+ {
+ if (errmsg != (const char **) NULL)
+ *errmsg = "illegal bitmask";
+ return insn;
+ }
+
+ me = 31;
+ while ((uval & 1) == 0)
+ {
+ uval >>= 1;
+ --me;
+ }
+
+ mb = me;
+ uval >>= 1;
+ while ((uval & 1) != 0)
+ {
+ uval >>= 1;
+ --mb;
+ }
+
+ if (uval != 0)
+ {
+ if (errmsg != (const char **) NULL)
+ *errmsg = "illegal bitmask";
+ }
+
+ return insn | (mb << 6) | (me << 1);
+}
+
+static long
+extract_mbe (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ long ret;
+ int mb, me;
+ int i;
+
+ if (invalid != (int *) NULL)
+ *invalid = 1;
+
+ ret = 0;
+ mb = (insn >> 6) & 0x1f;
+ me = (insn >> 1) & 0x1f;
+ for (i = mb; i < me; i++)
+ ret |= 1 << (31 - i);
+ return ret;
+}
+
+/* The MB or ME field in an MD or MDS form instruction. The high bit
+ is wrapped to the low end. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_mb6 (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | ((value & 0x1f) << 6) | (value & 0x20);
+}
+
+/*ARGSUSED*/
+static long
+extract_mb6 (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ return ((insn >> 6) & 0x1f) | (insn & 0x20);
+}
+
+/* The NB field in an X form instruction. The value 32 is stored as
+ 0. */
+
+static unsigned long
+insert_nb (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (value < 0 || value > 32)
+ *errmsg = "value out of range";
+ if (value == 32)
+ value = 0;
+ return insn | ((value & 0x1f) << 11);
+}
+
+/*ARGSUSED*/
+static long
+extract_nb (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ long ret;
+
+ ret = (insn >> 11) & 0x1f;
+ if (ret == 0)
+ ret = 32;
+ return ret;
+}
+
+/* The NSI field in a D form instruction. This is the same as the SI
+ field, only negated. The extraction function always marks it as
+ invalid, since we never want to recognize an instruction which uses
+ a field of this type. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_nsi (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | ((- value) & 0xffff);
+}
+
+static long
+extract_nsi (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if (invalid != (int *) NULL)
+ *invalid = 1;
+ if ((insn & 0x8000) != 0)
+ return - ((insn & 0xffff) - 0x10000);
+ else
+ return - (insn & 0xffff);
+}
+
+/* The RA field in a D or X form instruction which is an updating
+ load, which means that the RA field may not be zero and may not
+ equal the RT field. */
+
+static unsigned long
+insert_ral (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (value == 0
+ || value == ((insn >> 21) & 0x1f))
+ *errmsg = "invalid register operand when updating";
+ return insn | ((value & 0x1f) << 16);
+}
+
+/* The RA field in an lmw instruction, which has special value
+ restrictions. */
+
+static unsigned long
+insert_ram (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (value >= ((insn >> 21) & 0x1f))
+ *errmsg = "index register in load range";
+ return insn | ((value & 0x1f) << 16);
+}
+
+/* The RA field in a D or X form instruction which is an updating
+ store or an updating floating point load, which means that the RA
+ field may not be zero. */
+
+static unsigned long
+insert_ras (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (value == 0)
+ *errmsg = "invalid register operand when updating";
+ return insn | ((value & 0x1f) << 16);
+}
+
+/* The RB field in an X form instruction when it must be the same as
+ the RS field in the instruction. This is used for extended
+ mnemonics like mr. This operand is marked FAKE. The insertion
+ function just copies the BT field into the BA field, and the
+ extraction function just checks that the fields are the same. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_rbs (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | (((insn >> 21) & 0x1f) << 11);
+}
+
+static long
+extract_rbs (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ if (invalid != (int *) NULL
+ && ((insn >> 21) & 0x1f) != ((insn >> 11) & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* The SH field in an MD form instruction. This is split. */
+
+/*ARGSUSED*/
+static unsigned long
+insert_sh6 (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4);
+}
+
+/*ARGSUSED*/
+static long
+extract_sh6 (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20);
+}
+
+/* The SPR field in an XFX form instruction. This is flipped--the
+ lower 5 bits are stored in the upper 5 and vice- versa. */
+
+static unsigned long
+insert_spr (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6);
+}
+
+static long
+extract_spr (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
+}
+
+/* The TBR field in an XFX instruction. This is just like SPR, but it
+ is optional. When TBR is omitted, it must be inserted as 268 (the
+ magic number of the TB register). These functions treat 0
+ (indicating an omitted optional operand) as 268. This means that
+ ``mftb 4,0'' is not handled correctly. This does not matter very
+ much, since the architecture manual does not define mftb as
+ accepting any values other than 268 or 269. */
+
+#define TB (268)
+
+static unsigned long
+insert_tbr (insn, value, errmsg)
+ unsigned long insn;
+ long value;
+ const char **errmsg;
+{
+ if (value == 0)
+ value = TB;
+ return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6);
+}
+
+static long
+extract_tbr (insn, invalid)
+ unsigned long insn;
+ int *invalid;
+{
+ long ret;
+
+ ret = ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
+ if (ret == TB)
+ ret = 0;
+ return ret;
+}
+\f
+/* Macros used to form opcodes. */
+
+/* The main opcode. */
+#define OP(x) (((x) & 0x3f) << 26)
+#define OP_MASK OP (0x3f)
+
+/* The main opcode combined with a trap code in the TO field of a D
+ form instruction. Used for extended mnemonics for the trap
+ instructions. */
+#define OPTO(x,to) (OP (x) | (((to) & 0x1f) << 21))
+#define OPTO_MASK (OP_MASK | TO_MASK)
+
+/* The main opcode combined with a comparison size bit in the L field
+ of a D form or X form instruction. Used for extended mnemonics for
+ the comparison instructions. */
+#define OPL(x,l) (OP (x) | (((l) & 1) << 21))
+#define OPL_MASK OPL (0x3f,1)
+
+/* An A form instruction. */
+#define A(op, xop, rc) (OP (op) | (((xop) & 0x1f) << 1) | ((rc) & 1))
+#define A_MASK A (0x3f, 0x1f, 1)
+
+/* An A_MASK with the FRB field fixed. */
+#define AFRB_MASK (A_MASK | FRB_MASK)
+
+/* An A_MASK with the FRC field fixed. */
+#define AFRC_MASK (A_MASK | FRC_MASK)
+
+/* An A_MASK with the FRA and FRC fields fixed. */
+#define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK)
+
+/* A B form instruction. */
+#define B(op, aa, lk) (OP (op) | (((aa) & 1) << 1) | ((lk) & 1))
+#define B_MASK B (0x3f, 1, 1)
+
+/* A B form instruction setting the BO field. */
+#define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | (((bo) & 0x1f) << 21))
+#define BBO_MASK BBO (0x3f, 0x1f, 1, 1)
+
+/* A BBO_MASK with the y bit of the BO field removed. This permits
+ matching a conditional branch regardless of the setting of the y
+ bit. */
+#define Y_MASK (1 << 21)
+#define BBOY_MASK (BBO_MASK &~ Y_MASK)
+
+/* A B form instruction setting the BO field and the condition bits of
+ the BI field. */
+#define BBOCB(op, bo, cb, aa, lk) \
+ (BBO ((op), (bo), (aa), (lk)) | (((cb) & 0x3) << 16))
+#define BBOCB_MASK BBOCB (0x3f, 0x1f, 0x3, 1, 1)
+
+/* A BBOCB_MASK with the y bit of the BO field removed. */
+#define BBOYCB_MASK (BBOCB_MASK &~ Y_MASK)
+
+/* A BBOYCB_MASK in which the BI field is fixed. */
+#define BBOYBI_MASK (BBOYCB_MASK | BI_MASK)
+
+/* The main opcode mask with the RA field clear. */
+#define DRA_MASK (OP_MASK | RA_MASK)
+
+/* A DS form instruction. */
+#define DSO(op, xop) (OP (op) | ((xop) & 0x3))
+#define DS_MASK DSO (0x3f, 3)
+
+/* An M form instruction. */
+#define M(op, rc) (OP (op) | ((rc) & 1))
+#define M_MASK M (0x3f, 1)
+
+/* An M form instruction with the ME field specified. */
+#define MME(op, me, rc) (M ((op), (rc)) | (((me) & 0x1f) << 1))
+
+/* An M_MASK with the MB and ME fields fixed. */
+#define MMBME_MASK (M_MASK | MB_MASK | ME_MASK)
+
+/* An M_MASK with the SH and ME fields fixed. */
+#define MSHME_MASK (M_MASK | SH_MASK | ME_MASK)
+
+/* An MD form instruction. */
+#define MD(op, xop, rc) (OP (op) | (((xop) & 0x7) << 2) | ((rc) & 1))
+#define MD_MASK MD (0x3f, 0x7, 1)
+
+/* An MD_MASK with the MB field fixed. */
+#define MDMB_MASK (MD_MASK | MB6_MASK)
+
+/* An MD_MASK with the SH field fixed. */
+#define MDSH_MASK (MD_MASK | SH6_MASK)
+
+/* An MDS form instruction. */
+#define MDS(op, xop, rc) (OP (op) | (((xop) & 0xf) << 1) | ((rc) & 1))
+#define MDS_MASK MDS (0x3f, 0xf, 1)
+
+/* An MDS_MASK with the MB field fixed. */
+#define MDSMB_MASK (MDS_MASK | MB6_MASK)
+
+/* An SC form instruction. */
+#define SC(op, sa, lk) (OP (op) | (((sa) & 1) << 1) | ((lk) & 1))
+#define SC_MASK (OP_MASK | (0x3ff << 16) | (1 << 1) | 1)
+
+/* An X form instruction. */
+#define X(op, xop) (OP (op) | (((xop) & 0x3ff) << 1))
+
+/* An X form instruction with the RC bit specified. */
+#define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
+
+/* The mask for an X form instruction. */
+#define X_MASK XRC (0x3f, 0x3ff, 1)
+
+/* An X_MASK with the RA field fixed. */
+#define XRA_MASK (X_MASK | RA_MASK)
+
+/* An X_MASK with the RB field fixed. */
+#define XRB_MASK (X_MASK | RB_MASK)
+
+/* An X_MASK with the RT field fixed. */
+#define XRT_MASK (X_MASK | RT_MASK)
+
+/* An X_MASK with the RA and RB fields fixed. */
+#define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
+
+/* An X_MASK with the RT and RA fields fixed. */
+#define XRTRA_MASK (X_MASK | RT_MASK | RA_MASK)
+
+/* An X form comparison instruction. */
+#define XCMPL(op, xop, l) (X ((op), (xop)) | (((l) & 1) << 21))
+
+/* The mask for an X form comparison instruction. */
+#define XCMP_MASK (X_MASK | (1 << 22))
+
+/* The mask for an X form comparison instruction with the L field
+ fixed. */
+#define XCMPL_MASK (XCMP_MASK | (1 << 21))
+
+/* An X form trap instruction with the TO field specified. */
+#define XTO(op, xop, to) (X ((op), (xop)) | (((to) & 0x1f) << 21))
+#define XTO_MASK (X_MASK | TO_MASK)
+
+/* An XFL form instruction. */
+#define XFL(op, xop, rc) (OP (op) | (((xop) & 0x3ff) << 1) | ((rc) & 1))
+#define XFL_MASK (XFL (0x3f, 0x3ff, 1) | (1 << 25) | (1 << 16))
+
+/* An XL form instruction with the LK field set to 0. */
+#define XL(op, xop) (OP (op) | (((xop) & 0x3ff) << 1))
+
+/* An XL form instruction which uses the LK field. */
+#define XLLK(op, xop, lk) (XL ((op), (xop)) | ((lk) & 1))
+
+/* The mask for an XL form instruction. */
+#define XL_MASK XLLK (0x3f, 0x3ff, 1)
+
+/* An XL form instruction which explicitly sets the BO field. */
+#define XLO(op, bo, xop, lk) \
+ (XLLK ((op), (xop), (lk)) | (((bo) & 0x1f) << 21))
+#define XLO_MASK (XL_MASK | BO_MASK)
+
+/* An XL form instruction which explicitly sets the y bit of the BO
+ field. */
+#define XLYLK(op, xop, y, lk) (XLLK ((op), (xop), (lk)) | (((y) & 1) << 21))
+#define XLYLK_MASK (XL_MASK | Y_MASK)
+
+/* An XL form instruction which sets the BO field and the condition
+ bits of the BI field. */
+#define XLOCB(op, bo, cb, xop, lk) \
+ (XLO ((op), (bo), (xop), (lk)) | (((cb) & 3) << 16))
+#define XLOCB_MASK XLOCB (0x3f, 0x1f, 0x3, 0x3ff, 1)
+
+/* An XL_MASK or XLYLK_MASK or XLOCB_MASK with the BB field fixed. */
+#define XLBB_MASK (XL_MASK | BB_MASK)
+#define XLYBB_MASK (XLYLK_MASK | BB_MASK)
+#define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK)
+
+/* An XL_MASK with the BO and BB fields fixed. */
+#define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK)
+
+/* An XL_MASK with the BO, BI and BB fields fixed. */
+#define XLBOBIBB_MASK (XL_MASK | BO_MASK | BI_MASK | BB_MASK)
+
+/* An XO form instruction. */
+#define XO(op, xop, oe, rc) \
+ (OP (op) | (((xop) & 0x1ff) << 1) | (((oe) & 1) << 10) | ((rc) & 1))
+#define XO_MASK XO (0x3f, 0x1ff, 1, 1)
+
+/* An XO_MASK with the RB field fixed. */
+#define XORB_MASK (XO_MASK | RB_MASK)
+
+/* An XS form instruction. */
+#define XS(op, xop, rc) (OP (op) | (((xop) & 0x1ff) << 2) | ((rc) & 1))
+#define XS_MASK XS (0x3f, 0x1ff, 1)
+
+/* A mask for the FXM version of an XFX form instruction. */
+#define XFXFXM_MASK (X_MASK | (1 << 20) | (1 << 11))
+
+/* An XFX form instruction with the FXM field filled in. */
+#define XFXM(op, xop, fxm) \
+ (X ((op), (xop)) | (((fxm) & 0xff) << 12))
+
+/* An XFX form instruction with the SPR field filled in. */
+#define XSPR(op, xop, spr) \
+ (X ((op), (xop)) | (((spr) & 0x1f) << 16) | (((spr) & 0x3e0) << 6))
+#define XSPR_MASK (X_MASK | SPR_MASK)
+
+/* An XFX form instruction with the SPR field filled in except for the
+ SPRBAT field. */
+#define XSPRBAT_MASK (XSPR_MASK &~ SPRBAT_MASK)
+
+/* An XFX form instruction with the SPR field filled in except for the
+ SPRG field. */
+#define XSPRG_MASK (XSPR_MASK &~ SPRG_MASK)
+
+/* The BO encodings used in extended conditional branch mnemonics. */
+#define BODNZF (0x0)
+#define BODNZFP (0x1)
+#define BODZF (0x2)
+#define BODZFP (0x3)
+#define BOF (0x4)
+#define BOFP (0x5)
+#define BODNZT (0x8)
+#define BODNZTP (0x9)
+#define BODZT (0xa)
+#define BODZTP (0xb)
+#define BOT (0xc)
+#define BOTP (0xd)
+#define BODNZ (0x10)
+#define BODNZP (0x11)
+#define BODZ (0x12)
+#define BODZP (0x13)
+#define BOU (0x14)
+
+/* The BI condition bit encodings used in extended conditional branch
+ mnemonics. */
+#define CBLT (0)
+#define CBGT (1)
+#define CBEQ (2)
+#define CBSO (3)
+
+/* The TO encodings used in extended trap mnemonics. */
+#define TOLGT (0x1)
+#define TOLLT (0x2)
+#define TOEQ (0x4)
+#define TOLGE (0x5)
+#define TOLNL (0x5)
+#define TOLLE (0x6)
+#define TOLNG (0x6)
+#define TOGT (0x8)
+#define TOGE (0xc)
+#define TONL (0xc)
+#define TOLT (0x10)
+#define TOLE (0x14)
+#define TONG (0x14)
+#define TONE (0x18)
+#define TOU (0x1f)
+\f
+/* Smaller names for the flags so each entry in the opcodes table will
+ fit on a single line. */
+#undef PPC
+#define PPC PPC_OPCODE_PPC
+#define POWER PPC_OPCODE_POWER
+#define POWER2 PPC_OPCODE_POWER2
+#define B32 PPC_OPCODE_32
+#define B64 PPC_OPCODE_64
+#define M601 PPC_OPCODE_601
+\f
+/* The opcode table.
+
+ The format of the opcode table is:
+
+ NAME OPCODE MASK FLAGS { OPERANDS }
+
+ NAME is the name of the instruction.
+ OPCODE is the instruction opcode.
+ MASK is the opcode mask; this is used to tell the disassembler
+ which bits in the actual opcode must match OPCODE.
+ FLAGS are flags indicated what processors support the instruction.
+ OPERANDS is the list of operands.
+
+ The disassembler reads the table in order and prints the first
+ instruction which matches, so this table is sorted to put more
+ specific instructions before more general instructions. It is also
+ sorted by major opcode. */
+
+const struct powerpc_opcode powerpc_opcodes[] = {
+{ "tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdgti", OPTO(2,TOGT), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdgei", OPTO(2,TOGE), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdnli", OPTO(2,TONL), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdlti", OPTO(2,TOLT), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdlei", OPTO(2,TOLE), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdngi", OPTO(2,TONG), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdnei", OPTO(2,TONE), OPTO_MASK, PPC|B64, { RA, SI } },
+{ "tdi", OP(2), OP_MASK, PPC|B64, { TO, RA, SI } },
+
+{ "twlgti", OPTO(3,TOLGT), OPTO_MASK, PPC, { RA, SI } },
+{ "tlgti", OPTO(3,TOLGT), OPTO_MASK, POWER, { RA, SI } },
+{ "twllti", OPTO(3,TOLLT), OPTO_MASK, PPC, { RA, SI } },
+{ "tllti", OPTO(3,TOLLT), OPTO_MASK, POWER, { RA, SI } },
+{ "tweqi", OPTO(3,TOEQ), OPTO_MASK, PPC, { RA, SI } },
+{ "teqi", OPTO(3,TOEQ), OPTO_MASK, POWER, { RA, SI } },
+{ "twlgei", OPTO(3,TOLGE), OPTO_MASK, PPC, { RA, SI } },
+{ "tlgei", OPTO(3,TOLGE), OPTO_MASK, POWER, { RA, SI } },
+{ "twlnli", OPTO(3,TOLNL), OPTO_MASK, PPC, { RA, SI } },
+{ "tlnli", OPTO(3,TOLNL), OPTO_MASK, POWER, { RA, SI } },
+{ "twllei", OPTO(3,TOLLE), OPTO_MASK, PPC, { RA, SI } },
+{ "tllei", OPTO(3,TOLLE), OPTO_MASK, POWER, { RA, SI } },
+{ "twlngi", OPTO(3,TOLNG), OPTO_MASK, PPC, { RA, SI } },
+{ "tlngi", OPTO(3,TOLNG), OPTO_MASK, POWER, { RA, SI } },
+{ "twgti", OPTO(3,TOGT), OPTO_MASK, PPC, { RA, SI } },
+{ "tgti", OPTO(3,TOGT), OPTO_MASK, POWER, { RA, SI } },
+{ "twgei", OPTO(3,TOGE), OPTO_MASK, PPC, { RA, SI } },
+{ "tgei", OPTO(3,TOGE), OPTO_MASK, POWER, { RA, SI } },
+{ "twnli", OPTO(3,TONL), OPTO_MASK, PPC, { RA, SI } },
+{ "tnli", OPTO(3,TONL), OPTO_MASK, POWER, { RA, SI } },
+{ "twlti", OPTO(3,TOLT), OPTO_MASK, PPC, { RA, SI } },
+{ "tlti", OPTO(3,TOLT), OPTO_MASK, POWER, { RA, SI } },
+{ "twlei", OPTO(3,TOLE), OPTO_MASK, PPC, { RA, SI } },
+{ "tlei", OPTO(3,TOLE), OPTO_MASK, POWER, { RA, SI } },
+{ "twngi", OPTO(3,TONG), OPTO_MASK, PPC, { RA, SI } },
+{ "tngi", OPTO(3,TONG), OPTO_MASK, POWER, { RA, SI } },
+{ "twnei", OPTO(3,TONE), OPTO_MASK, PPC, { RA, SI } },
+{ "tnei", OPTO(3,TONE), OPTO_MASK, POWER, { RA, SI } },
+{ "twi", OP(3), OP_MASK, PPC, { TO, RA, SI } },
+{ "ti", OP(3), OP_MASK, POWER, { TO, RA, SI } },
+
+{ "mulli", OP(7), OP_MASK, PPC, { RT, RA, SI } },
+{ "muli", OP(7), OP_MASK, POWER, { RT, RA, SI } },
+
+{ "subfic", OP(8), OP_MASK, PPC, { RT, RA, SI } },
+{ "sfi", OP(8), OP_MASK, POWER, { RT, RA, SI } },
+
+{ "dozi", OP(9), OP_MASK, POWER|M601, { RT, RA, SI } },
+
+{ "cmplwi", OPL(10,0), OPL_MASK, PPC, { OBF, RA, UI } },
+{ "cmpldi", OPL(10,1), OPL_MASK, PPC|B64, { OBF, RA, UI } },
+{ "cmpli", OP(10), OP_MASK, PPC, { BF, L, RA, UI } },
+{ "cmpli", OP(10), OP_MASK, POWER, { BF, RA, UI } },
+
+{ "cmpwi", OPL(11,0), OPL_MASK, PPC, { OBF, RA, SI } },
+{ "cmpdi", OPL(11,1), OPL_MASK, PPC|B64, { OBF, RA, SI } },
+{ "cmpi", OP(11), OP_MASK, PPC, { BF, L, RA, SI } },
+{ "cmpi", OP(11), OP_MASK, POWER, { BF, RA, SI } },
+
+{ "addic", OP(12), OP_MASK, PPC, { RT, RA, SI } },
+{ "ai", OP(12), OP_MASK, POWER, { RT, RA, SI } },
+{ "subic", OP(12), OP_MASK, PPC, { RT, RA, NSI } },
+
+{ "addic.", OP(13), OP_MASK, PPC, { RT, RA, SI } },
+{ "ai.", OP(13), OP_MASK, POWER, { RT, RA, SI } },
+{ "subic.", OP(13), OP_MASK, PPC, { RT, RA, NSI } },
+
+{ "li", OP(14), DRA_MASK, PPC, { RT, SI } },
+{ "lil", OP(14), DRA_MASK, POWER, { RT, SI } },
+{ "addi", OP(14), OP_MASK, PPC, { RT, RA, SI } },
+{ "cal", OP(14), OP_MASK, POWER, { RT, D, RA } },
+{ "subi", OP(14), OP_MASK, PPC, { RT, RA, NSI } },
+{ "la", OP(14), OP_MASK, PPC, { RT, D, RA } },
+
+{ "lis", OP(15), DRA_MASK, PPC, { RT, SISIGNOPT } },
+{ "liu", OP(15), DRA_MASK, POWER, { RT, SISIGNOPT } },
+{ "addis", OP(15), OP_MASK, PPC, { RT,RA,SISIGNOPT } },
+{ "cau", OP(15), OP_MASK, POWER, { RT,RA,SISIGNOPT } },
+{ "subis", OP(15), OP_MASK, PPC, { RT, RA, NSI } },
+
+{ "bdnz-", BBO(16,BODNZ,0,0), BBOYBI_MASK, PPC, { BDM } },
+{ "bdnz+", BBO(16,BODNZ,0,0), BBOYBI_MASK, PPC, { BDP } },
+{ "bdnz", BBO(16,BODNZ,0,0), BBOYBI_MASK, PPC, { BD } },
+{ "bdn", BBO(16,BODNZ,0,0), BBOYBI_MASK, POWER, { BD } },
+{ "bdnzl-", BBO(16,BODNZ,0,1), BBOYBI_MASK, PPC, { BDM } },
+{ "bdnzl+", BBO(16,BODNZ,0,1), BBOYBI_MASK, PPC, { BDP } },
+{ "bdnzl", BBO(16,BODNZ,0,1), BBOYBI_MASK, PPC, { BD } },
+{ "bdnl", BBO(16,BODNZ,0,1), BBOYBI_MASK, POWER, { BD } },
+{ "bdnza-", BBO(16,BODNZ,1,0), BBOYBI_MASK, PPC, { BDMA } },
+{ "bdnza+", BBO(16,BODNZ,1,0), BBOYBI_MASK, PPC, { BDPA } },
+{ "bdnza", BBO(16,BODNZ,1,0), BBOYBI_MASK, PPC, { BDA } },
+{ "bdna", BBO(16,BODNZ,1,0), BBOYBI_MASK, POWER, { BDA } },
+{ "bdnzla-", BBO(16,BODNZ,1,1), BBOYBI_MASK, PPC, { BDMA } },
+{ "bdnzla+", BBO(16,BODNZ,1,1), BBOYBI_MASK, PPC, { BDPA } },
+{ "bdnzla", BBO(16,BODNZ,1,1), BBOYBI_MASK, PPC, { BDA } },
+{ "bdnla", BBO(16,BODNZ,1,1), BBOYBI_MASK, POWER, { BDA } },
+{ "bdz-", BBO(16,BODZ,0,0), BBOYBI_MASK, PPC, { BDM } },
+{ "bdz+", BBO(16,BODZ,0,0), BBOYBI_MASK, PPC, { BDP } },
+{ "bdz", BBO(16,BODZ,0,0), BBOYBI_MASK, PPC|POWER, { BD } },
+{ "bdzl-", BBO(16,BODZ,0,1), BBOYBI_MASK, PPC, { BDM } },
+{ "bdzl+", BBO(16,BODZ,0,1), BBOYBI_MASK, PPC, { BDP } },
+{ "bdzl", BBO(16,BODZ,0,1), BBOYBI_MASK, PPC|POWER, { BD } },
+{ "bdza-", BBO(16,BODZ,1,0), BBOYBI_MASK, PPC, { BDMA } },
+{ "bdza+", BBO(16,BODZ,1,0), BBOYBI_MASK, PPC, { BDPA } },
+{ "bdza", BBO(16,BODZ,1,0), BBOYBI_MASK, PPC|POWER, { BDA } },
+{ "bdzla-", BBO(16,BODZ,1,1), BBOYBI_MASK, PPC, { BDMA } },
+{ "bdzla+", BBO(16,BODZ,1,1), BBOYBI_MASK, PPC, { BDPA } },
+{ "bdzla", BBO(16,BODZ,1,1), BBOYBI_MASK, PPC|POWER, { BDA } },
+{ "blt-", BBOCB(16,BOT,CBLT,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "blt+", BBOCB(16,BOT,CBLT,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "blt", BBOCB(16,BOT,CBLT,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bltl-", BBOCB(16,BOT,CBLT,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bltl+", BBOCB(16,BOT,CBLT,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bltl", BBOCB(16,BOT,CBLT,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "blta-", BBOCB(16,BOT,CBLT,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "blta+", BBOCB(16,BOT,CBLT,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "blta", BBOCB(16,BOT,CBLT,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bltla-", BBOCB(16,BOT,CBLT,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bltla+", BBOCB(16,BOT,CBLT,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bltla", BBOCB(16,BOT,CBLT,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bgt-", BBOCB(16,BOT,CBGT,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bgt+", BBOCB(16,BOT,CBGT,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bgt", BBOCB(16,BOT,CBGT,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bgtl", BBOCB(16,BOT,CBGT,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bgta-", BBOCB(16,BOT,CBGT,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bgta+", BBOCB(16,BOT,CBGT,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bgta", BBOCB(16,BOT,CBGT,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bgtla", BBOCB(16,BOT,CBGT,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "beq-", BBOCB(16,BOT,CBEQ,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "beq+", BBOCB(16,BOT,CBEQ,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "beq", BBOCB(16,BOT,CBEQ,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "beql-", BBOCB(16,BOT,CBEQ,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "beql+", BBOCB(16,BOT,CBEQ,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "beql", BBOCB(16,BOT,CBEQ,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "beqa", BBOCB(16,BOT,CBEQ,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "beqla", BBOCB(16,BOT,CBEQ,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bso-", BBOCB(16,BOT,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bso+", BBOCB(16,BOT,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bso", BBOCB(16,BOT,CBSO,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bsol-", BBOCB(16,BOT,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bsol+", BBOCB(16,BOT,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bsol", BBOCB(16,BOT,CBSO,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bsoa", BBOCB(16,BOT,CBSO,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bsola-", BBOCB(16,BOT,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bsola+", BBOCB(16,BOT,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bsola", BBOCB(16,BOT,CBSO,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bun-", BBOCB(16,BOT,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bun+", BBOCB(16,BOT,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bun", BBOCB(16,BOT,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BD } },
+{ "bunl-", BBOCB(16,BOT,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bunl+", BBOCB(16,BOT,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bunl", BBOCB(16,BOT,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BD } },
+{ "buna-", BBOCB(16,BOT,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "buna+", BBOCB(16,BOT,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "buna", BBOCB(16,BOT,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDA } },
+{ "bunla-", BBOCB(16,BOT,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bunla+", BBOCB(16,BOT,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bunla", BBOCB(16,BOT,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDA } },
+{ "bge-", BBOCB(16,BOF,CBLT,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bge+", BBOCB(16,BOF,CBLT,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bge", BBOCB(16,BOF,CBLT,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bgel-", BBOCB(16,BOF,CBLT,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bgel+", BBOCB(16,BOF,CBLT,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bgel", BBOCB(16,BOF,CBLT,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bgea-", BBOCB(16,BOF,CBLT,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bgea+", BBOCB(16,BOF,CBLT,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bgea", BBOCB(16,BOF,CBLT,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bgela-", BBOCB(16,BOF,CBLT,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bgela+", BBOCB(16,BOF,CBLT,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bgela", BBOCB(16,BOF,CBLT,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bnl-", BBOCB(16,BOF,CBLT,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bnl+", BBOCB(16,BOF,CBLT,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bnl", BBOCB(16,BOF,CBLT,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnll-", BBOCB(16,BOF,CBLT,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bnll+", BBOCB(16,BOF,CBLT,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bnll", BBOCB(16,BOF,CBLT,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnla-", BBOCB(16,BOF,CBLT,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnla+", BBOCB(16,BOF,CBLT,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnla", BBOCB(16,BOF,CBLT,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnlla", BBOCB(16,BOF,CBLT,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "ble-", BBOCB(16,BOF,CBGT,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "ble+", BBOCB(16,BOF,CBGT,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "ble", BBOCB(16,BOF,CBGT,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "blel-", BBOCB(16,BOF,CBGT,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "blel+", BBOCB(16,BOF,CBGT,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "blel", BBOCB(16,BOF,CBGT,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "blea-", BBOCB(16,BOF,CBGT,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "blea+", BBOCB(16,BOF,CBGT,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "blea", BBOCB(16,BOF,CBGT,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "blela-", BBOCB(16,BOF,CBGT,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "blela+", BBOCB(16,BOF,CBGT,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "blela", BBOCB(16,BOF,CBGT,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bng-", BBOCB(16,BOF,CBGT,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bng+", BBOCB(16,BOF,CBGT,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bng", BBOCB(16,BOF,CBGT,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bngl-", BBOCB(16,BOF,CBGT,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bngl+", BBOCB(16,BOF,CBGT,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bngl", BBOCB(16,BOF,CBGT,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnga-", BBOCB(16,BOF,CBGT,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnga+", BBOCB(16,BOF,CBGT,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnga", BBOCB(16,BOF,CBGT,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bngla-", BBOCB(16,BOF,CBGT,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bngla+", BBOCB(16,BOF,CBGT,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bngla", BBOCB(16,BOF,CBGT,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bne-", BBOCB(16,BOF,CBEQ,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bne+", BBOCB(16,BOF,CBEQ,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bne", BBOCB(16,BOF,CBEQ,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bnel", BBOCB(16,BOF,CBEQ,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnea", BBOCB(16,BOF,CBEQ,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnela", BBOCB(16,BOF,CBEQ,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bns-", BBOCB(16,BOF,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bns+", BBOCB(16,BOF,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bns", BBOCB(16,BOF,CBSO,0,0), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bnsl", BBOCB(16,BOF,CBSO,0,1), BBOYCB_MASK, PPC|POWER, { CR, BD } },
+{ "bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnsa", BBOCB(16,BOF,CBSO,1,0), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnsla", BBOCB(16,BOF,CBSO,1,1), BBOYCB_MASK, PPC|POWER, { CR, BDA } },
+{ "bnu-", BBOCB(16,BOF,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bnu+", BBOCB(16,BOF,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bnu", BBOCB(16,BOF,CBSO,0,0), BBOYCB_MASK, PPC, { CR, BD } },
+{ "bnul-", BBOCB(16,BOF,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDM } },
+{ "bnul+", BBOCB(16,BOF,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BDP } },
+{ "bnul", BBOCB(16,BOF,CBSO,0,1), BBOYCB_MASK, PPC, { CR, BD } },
+{ "bnua-", BBOCB(16,BOF,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnua+", BBOCB(16,BOF,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnua", BBOCB(16,BOF,CBSO,1,0), BBOYCB_MASK, PPC, { CR, BDA } },
+{ "bnula-", BBOCB(16,BOF,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDMA } },
+{ "bnula+", BBOCB(16,BOF,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDPA } },
+{ "bnula", BBOCB(16,BOF,CBSO,1,1), BBOYCB_MASK, PPC, { CR, BDA } },
+{ "bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPC, { BI, BD } },
+{ "bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPC, { BI, BD } },
+{ "bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPC, { BI, BDA } },
+{ "bdnztla-",BBO(16,BODNZT,1,1), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdnztla+",BBO(16,BODNZT,1,1), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPC, { BI, BDA } },
+{ "bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPC, { BI, BD } },
+{ "bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPC, { BI, BD } },
+{ "bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPC, { BI, BDA } },
+{ "bdnzfla-",BBO(16,BODNZF,1,1), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdnzfla+",BBO(16,BODNZF,1,1), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPC, { BI, BDA } },
+{ "bt-", BBO(16,BOT,0,0), BBOY_MASK, PPC, { BI, BDM } },
+{ "bt+", BBO(16,BOT,0,0), BBOY_MASK, PPC, { BI, BDP } },
+{ "bt", BBO(16,BOT,0,0), BBOY_MASK, PPC, { BI, BD } },
+{ "bbt", BBO(16,BOT,0,0), BBOY_MASK, POWER, { BI, BD } },
+{ "btl-", BBO(16,BOT,0,1), BBOY_MASK, PPC, { BI, BDM } },
+{ "btl+", BBO(16,BOT,0,1), BBOY_MASK, PPC, { BI, BDP } },
+{ "btl", BBO(16,BOT,0,1), BBOY_MASK, PPC, { BI, BD } },
+{ "bbtl", BBO(16,BOT,0,1), BBOY_MASK, POWER, { BI, BD } },
+{ "bta-", BBO(16,BOT,1,0), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bta+", BBO(16,BOT,1,0), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bta", BBO(16,BOT,1,0), BBOY_MASK, PPC, { BI, BDA } },
+{ "bbta", BBO(16,BOT,1,0), BBOY_MASK, POWER, { BI, BDA } },
+{ "btla-", BBO(16,BOT,1,1), BBOY_MASK, PPC, { BI, BDMA } },
+{ "btla+", BBO(16,BOT,1,1), BBOY_MASK, PPC, { BI, BDPA } },
+{ "btla", BBO(16,BOT,1,1), BBOY_MASK, PPC, { BI, BDA } },
+{ "bbtla", BBO(16,BOT,1,1), BBOY_MASK, POWER, { BI, BDA } },
+{ "bf-", BBO(16,BOF,0,0), BBOY_MASK, PPC, { BI, BDM } },
+{ "bf+", BBO(16,BOF,0,0), BBOY_MASK, PPC, { BI, BDP } },
+{ "bf", BBO(16,BOF,0,0), BBOY_MASK, PPC, { BI, BD } },
+{ "bbf", BBO(16,BOF,0,0), BBOY_MASK, POWER, { BI, BD } },
+{ "bfl-", BBO(16,BOF,0,1), BBOY_MASK, PPC, { BI, BDM } },
+{ "bfl+", BBO(16,BOF,0,1), BBOY_MASK, PPC, { BI, BDP } },
+{ "bfl", BBO(16,BOF,0,1), BBOY_MASK, PPC, { BI, BD } },
+{ "bbfl", BBO(16,BOF,0,1), BBOY_MASK, POWER, { BI, BD } },
+{ "bfa-", BBO(16,BOF,1,0), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bfa+", BBO(16,BOF,1,0), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bfa", BBO(16,BOF,1,0), BBOY_MASK, PPC, { BI, BDA } },
+{ "bbfa", BBO(16,BOF,1,0), BBOY_MASK, POWER, { BI, BDA } },
+{ "bfla-", BBO(16,BOF,1,1), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bfla+", BBO(16,BOF,1,1), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bfla", BBO(16,BOF,1,1), BBOY_MASK, PPC, { BI, BDA } },
+{ "bbfla", BBO(16,BOF,1,1), BBOY_MASK, POWER, { BI, BDA } },
+{ "bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPC, { BI, BD } },
+{ "bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPC, { BI, BD } },
+{ "bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPC, { BI, BDA } },
+{ "bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPC, { BI, BDA } },
+{ "bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPC, { BI, BD } },
+{ "bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, PPC, { BI, BDM } },
+{ "bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, PPC, { BI, BDP } },
+{ "bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPC, { BI, BD } },
+{ "bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPC, { BI, BDA } },
+{ "bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, PPC, { BI, BDMA } },
+{ "bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, PPC, { BI, BDPA } },
+{ "bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPC, { BI, BDA } },
+{ "bc-", B(16,0,0), B_MASK, PPC, { BOE, BI, BDM } },
+{ "bc+", B(16,0,0), B_MASK, PPC, { BOE, BI, BDP } },
+{ "bc", B(16,0,0), B_MASK, PPC|POWER, { BO, BI, BD } },
+{ "bcl-", B(16,0,1), B_MASK, PPC, { BOE, BI, BDM } },
+{ "bcl+", B(16,0,1), B_MASK, PPC, { BOE, BI, BDP } },
+{ "bcl", B(16,0,1), B_MASK, PPC|POWER, { BO, BI, BD } },
+{ "bca-", B(16,1,0), B_MASK, PPC, { BOE, BI, BDMA } },
+{ "bca+", B(16,1,0), B_MASK, PPC, { BOE, BI, BDPA } },
+{ "bca", B(16,1,0), B_MASK, PPC|POWER, { BO, BI, BDA } },
+{ "bcla-", B(16,1,1), B_MASK, PPC, { BOE, BI, BDMA } },
+{ "bcla+", B(16,1,1), B_MASK, PPC, { BOE, BI, BDPA } },
+{ "bcla", B(16,1,1), B_MASK, PPC|POWER, { BO, BI, BDA } },
+
+{ "sc", SC(17,1,0), 0xffffffff, PPC, { 0 } },
+{ "svc", SC(17,0,0), SC_MASK, POWER, { LEV, FL1, FL2 } },
+{ "svcl", SC(17,0,1), SC_MASK, POWER, { LEV, FL1, FL2 } },
+{ "svca", SC(17,1,0), SC_MASK, POWER, { SV } },
+{ "svcla", SC(17,1,1), SC_MASK, POWER, { SV } },
+
+{ "b", B(18,0,0), B_MASK, PPC|POWER, { LI } },
+{ "bl", B(18,0,1), B_MASK, PPC|POWER, { LI } },
+{ "ba", B(18,1,0), B_MASK, PPC|POWER, { LIA } },
+{ "bla", B(18,1,1), B_MASK, PPC|POWER, { LIA } },
+
+{ "mcrf", XL(19,0), XLBB_MASK|(3<<21)|(3<<16), PPC|POWER, { BF, BFA } },
+
+{ "blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "br", XLO(19,BOU,16,0), XLBOBIBB_MASK, POWER, { 0 } },
+{ "blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, POWER, { 0 } },
+{ "bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdnzlrl-",XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdnzlrl+",XLO(19,BODNZP,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, PPC, { 0 } },
+{ "bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, POWER, { CR } },
+{ "bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, POWER, { BI } },
+{ "btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, POWER, { BI } },
+{ "bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, POWER, { BI } },
+{ "bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, POWER, { BI } },
+{ "bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdnztlr-",XLO(19,BODNZT,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdnztlr+",XLO(19,BODNZTP,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdnztlrl",XLO(19,BODNZT,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdnztlrl-",XLO(19,BODNZT,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdnztlrl+",XLO(19,BODNZTP,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdnzflr-",XLO(19,BODNZF,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdnzflr+",XLO(19,BODNZFP,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdnzflrl",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdztlrl-",XLO(19,BODZT,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdztlrl+",XLO(19,BODZTP,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, PPC, { BI } },
+{ "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, PPC, { BI } },
+{ "bclr", XLLK(19,16,0), XLYBB_MASK, PPC, { BO, BI } },
+{ "bclrl", XLLK(19,16,1), XLYBB_MASK, PPC, { BO, BI } },
+{ "bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bcr", XLLK(19,16,0), XLBB_MASK, POWER, { BO, BI } },
+{ "bcrl", XLLK(19,16,1), XLBB_MASK, POWER, { BO, BI } },
+
+{ "crnot", XL(19,33), XL_MASK, PPC, { BT, BA, BBA } },
+{ "crnor", XL(19,33), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "rfi", XL(19,50), 0xffffffff, PPC|POWER, { 0 } },
+{ "rfci", XL(19,51), 0xffffffff, PPC, { 0 } },
+
+{ "rfsvc", XL(19,82), 0xffffffff, POWER, { 0 } },
+
+{ "crandc", XL(19,129), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "isync", XL(19,150), 0xffffffff, PPC, { 0 } },
+{ "ics", XL(19,150), 0xffffffff, POWER, { 0 } },
+
+{ "crclr", XL(19,193), XL_MASK, PPC, { BT, BAT, BBA } },
+{ "crxor", XL(19,193), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "crnand", XL(19,225), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "crand", XL(19,257), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "crset", XL(19,289), XL_MASK, PPC, { BT, BAT, BBA } },
+{ "creqv", XL(19,289), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "crorc", XL(19,417), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "crmove", XL(19,449), XL_MASK, PPC, { BT, BA, BBA } },
+{ "cror", XL(19,449), XL_MASK, PPC|POWER, { BT, BA, BB } },
+
+{ "bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, PPC|POWER, { 0 } },
+{ "bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, PPC|POWER, { 0 } },
+{ "bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, PPC, { CR } },
+{ "btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPC, { BI } },
+{ "btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, PPC, { BI } },
+{ "btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, PPC, { BI } },
+{ "btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPC, { BI } },
+{ "btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, PPC, { BI } },
+{ "btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, PPC, { BI } },
+{ "bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPC, { BI } },
+{ "bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, PPC, { BI } },
+{ "bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, PPC, { BI } },
+{ "bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPC, { BI } },
+{ "bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, PPC, { BI } },
+{ "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, PPC, { BI } },
+{ "bcctr", XLLK(19,528,0), XLYBB_MASK, PPC, { BO, BI } },
+{ "bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bcctrl", XLLK(19,528,1), XLYBB_MASK, PPC, { BO, BI } },
+{ "bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPC, { BOE, BI } },
+{ "bcc", XLLK(19,528,0), XLBB_MASK, POWER, { BO, BI } },
+{ "bccl", XLLK(19,528,1), XLBB_MASK, POWER, { BO, BI } },
+
+{ "rlwimi", M(20,0), M_MASK, PPC, { RA,RS,SH,MBE,ME } },
+{ "rlimi", M(20,0), M_MASK, POWER, { RA,RS,SH,MBE,ME } },
+
+{ "rlwimi.", M(20,1), M_MASK, PPC, { RA,RS,SH,MBE,ME } },
+{ "rlimi.", M(20,1), M_MASK, POWER, { RA,RS,SH,MBE,ME } },
+
+{ "rotlwi", MME(21,31,0), MMBME_MASK, PPC, { RA, RS, SH } },
+{ "clrlwi", MME(21,31,0), MSHME_MASK, PPC, { RA, RS, MB } },
+{ "rlwinm", M(21,0), M_MASK, PPC, { RA,RS,SH,MBE,ME } },
+{ "rlinm", M(21,0), M_MASK, POWER, { RA,RS,SH,MBE,ME } },
+{ "rotlwi.", MME(21,31,1), MMBME_MASK, PPC, { RA,RS,SH } },
+{ "clrlwi.", MME(21,31,1), MSHME_MASK, PPC, { RA, RS, MB } },
+{ "rlwinm.", M(21,1), M_MASK, PPC, { RA,RS,SH,MBE,ME } },
+{ "rlinm.", M(21,1), M_MASK, POWER, { RA,RS,SH,MBE,ME } },
+
+{ "rlmi", M(22,0), M_MASK, POWER|M601, { RA,RS,RB,MBE,ME } },
+{ "rlmi.", M(22,1), M_MASK, POWER|M601, { RA,RS,RB,MBE,ME } },
+
+{ "rotlw", MME(23,31,0), MMBME_MASK, PPC, { RA, RS, RB } },
+{ "rlwnm", M(23,0), M_MASK, PPC, { RA,RS,RB,MBE,ME } },
+{ "rlnm", M(23,0), M_MASK, POWER, { RA,RS,RB,MBE,ME } },
+{ "rotlw.", MME(23,31,1), MMBME_MASK, PPC, { RA, RS, RB } },
+{ "rlwnm.", M(23,1), M_MASK, PPC, { RA,RS,RB,MBE,ME } },
+{ "rlnm.", M(23,1), M_MASK, POWER, { RA,RS,RB,MBE,ME } },
+
+{ "nop", OP(24), 0xffffffff, PPC, { 0 } },
+{ "ori", OP(24), OP_MASK, PPC, { RA, RS, UI } },
+{ "oril", OP(24), OP_MASK, POWER, { RA, RS, UI } },
+
+{ "oris", OP(25), OP_MASK, PPC, { RA, RS, UI } },
+{ "oriu", OP(25), OP_MASK, POWER, { RA, RS, UI } },
+
+{ "xori", OP(26), OP_MASK, PPC, { RA, RS, UI } },
+{ "xoril", OP(26), OP_MASK, POWER, { RA, RS, UI } },
+
+{ "xoris", OP(27), OP_MASK, PPC, { RA, RS, UI } },
+{ "xoriu", OP(27), OP_MASK, POWER, { RA, RS, UI } },
+
+{ "andi.", OP(28), OP_MASK, PPC, { RA, RS, UI } },
+{ "andil.", OP(28), OP_MASK, POWER, { RA, RS, UI } },
+
+{ "andis.", OP(29), OP_MASK, PPC, { RA, RS, UI } },
+{ "andiu.", OP(29), OP_MASK, POWER, { RA, RS, UI } },
+
+{ "rotldi", MD(30,0,0), MDMB_MASK, PPC|B64, { RA, RS, SH6 } },
+{ "clrldi", MD(30,0,0), MDSH_MASK, PPC|B64, { RA, RS, MB6 } },
+{ "rldicl", MD(30,0,0), MD_MASK, PPC|B64, { RA, RS, SH6, MB6 } },
+{ "rotldi.", MD(30,0,1), MDMB_MASK, PPC|B64, { RA, RS, SH6 } },
+{ "clrldi.", MD(30,0,1), MDSH_MASK, PPC|B64, { RA, RS, MB6 } },
+{ "rldicl.", MD(30,0,1), MD_MASK, PPC|B64, { RA, RS, SH6, MB6 } },
+
+{ "rldicr", MD(30,1,0), MD_MASK, PPC|B64, { RA, RS, SH6, ME6 } },
+{ "rldicr.", MD(30,1,1), MD_MASK, PPC|B64, { RA, RS, SH6, ME6 } },
+
+{ "rldic", MD(30,2,0), MD_MASK, PPC|B64, { RA, RS, SH6, MB6 } },
+{ "rldic.", MD(30,2,1), MD_MASK, PPC|B64, { RA, RS, SH6, MB6 } },
+
+{ "rldimi", MD(30,3,0), MD_MASK, PPC|B64, { RA, RS, SH6, MB6 } },
+{ "rldimi.", MD(30,3,1), MD_MASK, PPC|B64, { RA, RS, SH6, MB6 } },
+
+{ "rotld", MDS(30,8,0), MDSMB_MASK, PPC|B64, { RA, RS, RB } },
+{ "rldcl", MDS(30,8,0), MDS_MASK, PPC|B64, { RA, RS, RB, MB6 } },
+{ "rotld.", MDS(30,8,1), MDSMB_MASK, PPC|B64, { RA, RS, RB } },
+{ "rldcl.", MDS(30,8,1), MDS_MASK, PPC|B64, { RA, RS, RB, MB6 } },
+
+{ "rldcr", MDS(30,9,0), MDS_MASK, PPC|B64, { RA, RS, RB, ME6 } },
+{ "rldcr.", MDS(30,9,1), MDS_MASK, PPC|B64, { RA, RS, RB, ME6 } },
+
+{ "cmpw", XCMPL(31,0,0), XCMPL_MASK, PPC, { OBF, RA, RB } },
+{ "cmpd", XCMPL(31,0,1), XCMPL_MASK, PPC|B64, { OBF, RA, RB } },
+{ "cmp", X(31,0), XCMP_MASK, PPC, { BF, L, RA, RB } },
+{ "cmp", X(31,0), XCMPL_MASK, POWER, { BF, RA, RB } },
+
+{ "twlgt", XTO(31,4,TOLGT), XTO_MASK, PPC, { RA, RB } },
+{ "tlgt", XTO(31,4,TOLGT), XTO_MASK, POWER, { RA, RB } },
+{ "twllt", XTO(31,4,TOLLT), XTO_MASK, PPC, { RA, RB } },
+{ "tllt", XTO(31,4,TOLLT), XTO_MASK, POWER, { RA, RB } },
+{ "tweq", XTO(31,4,TOEQ), XTO_MASK, PPC, { RA, RB } },
+{ "teq", XTO(31,4,TOEQ), XTO_MASK, POWER, { RA, RB } },
+{ "twlge", XTO(31,4,TOLGE), XTO_MASK, PPC, { RA, RB } },
+{ "tlge", XTO(31,4,TOLGE), XTO_MASK, POWER, { RA, RB } },
+{ "twlnl", XTO(31,4,TOLNL), XTO_MASK, PPC, { RA, RB } },
+{ "tlnl", XTO(31,4,TOLNL), XTO_MASK, POWER, { RA, RB } },
+{ "twlle", XTO(31,4,TOLLE), XTO_MASK, PPC, { RA, RB } },
+{ "tlle", XTO(31,4,TOLLE), XTO_MASK, POWER, { RA, RB } },
+{ "twlng", XTO(31,4,TOLNG), XTO_MASK, PPC, { RA, RB } },
+{ "tlng", XTO(31,4,TOLNG), XTO_MASK, POWER, { RA, RB } },
+{ "twgt", XTO(31,4,TOGT), XTO_MASK, PPC, { RA, RB } },
+{ "tgt", XTO(31,4,TOGT), XTO_MASK, POWER, { RA, RB } },
+{ "twge", XTO(31,4,TOGE), XTO_MASK, PPC, { RA, RB } },
+{ "tge", XTO(31,4,TOGE), XTO_MASK, POWER, { RA, RB } },
+{ "twnl", XTO(31,4,TONL), XTO_MASK, PPC, { RA, RB } },
+{ "tnl", XTO(31,4,TONL), XTO_MASK, POWER, { RA, RB } },
+{ "twlt", XTO(31,4,TOLT), XTO_MASK, PPC, { RA, RB } },
+{ "tlt", XTO(31,4,TOLT), XTO_MASK, POWER, { RA, RB } },
+{ "twle", XTO(31,4,TOLE), XTO_MASK, PPC, { RA, RB } },
+{ "tle", XTO(31,4,TOLE), XTO_MASK, POWER, { RA, RB } },
+{ "twng", XTO(31,4,TONG), XTO_MASK, PPC, { RA, RB } },
+{ "tng", XTO(31,4,TONG), XTO_MASK, POWER, { RA, RB } },
+{ "twne", XTO(31,4,TONE), XTO_MASK, PPC, { RA, RB } },
+{ "tne", XTO(31,4,TONE), XTO_MASK, POWER, { RA, RB } },
+{ "trap", XTO(31,4,TOU), 0xffffffff, PPC, { 0 } },
+{ "tw", X(31,4), X_MASK, PPC, { TO, RA, RB } },
+{ "t", X(31,4), X_MASK, POWER, { TO, RA, RB } },
+
+{ "subfc", XO(31,8,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "sf", XO(31,8,0,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "subc", XO(31,8,0,0), XO_MASK, PPC, { RT, RB, RA } },
+{ "subfc.", XO(31,8,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "sf.", XO(31,8,0,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "subc.", XO(31,8,0,1), XO_MASK, PPC, { RT, RB, RA } },
+{ "subfco", XO(31,8,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "sfo", XO(31,8,1,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "subco", XO(31,8,1,0), XO_MASK, PPC, { RT, RB, RA } },
+{ "subfco.", XO(31,8,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "sfo.", XO(31,8,1,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "subco.", XO(31,8,1,1), XO_MASK, PPC, { RT, RB, RA } },
+
+{ "mulhdu", XO(31,9,0,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "mulhdu.", XO(31,9,0,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "addc", XO(31,10,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "a", XO(31,10,0,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "addc.", XO(31,10,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "a.", XO(31,10,0,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "addco", XO(31,10,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "ao", XO(31,10,1,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "addco.", XO(31,10,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "ao.", XO(31,10,1,1), XO_MASK, POWER, { RT, RA, RB } },
+
+{ "mulhwu", XO(31,11,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "mulhwu.", XO(31,11,0,1), XO_MASK, PPC, { RT, RA, RB } },
+
+{ "mfcr", X(31,19), XRARB_MASK, POWER|PPC, { RT } },
+
+{ "lwarx", X(31,20), X_MASK, PPC, { RT, RA, RB } },
+
+{ "ldx", X(31,21), X_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "lwzx", X(31,23), X_MASK, PPC, { RT, RA, RB } },
+{ "lx", X(31,23), X_MASK, POWER, { RT, RA, RB } },
+
+{ "slw", XRC(31,24,0), X_MASK, PPC, { RA, RS, RB } },
+{ "sl", XRC(31,24,0), X_MASK, POWER, { RA, RS, RB } },
+{ "slw.", XRC(31,24,1), X_MASK, PPC, { RA, RS, RB } },
+{ "sl.", XRC(31,24,1), X_MASK, POWER, { RA, RS, RB } },
+
+{ "cntlzw", XRC(31,26,0), XRB_MASK, PPC, { RA, RS } },
+{ "cntlz", XRC(31,26,0), XRB_MASK, POWER, { RA, RS } },
+{ "cntlzw.", XRC(31,26,1), XRB_MASK, PPC, { RA, RS } },
+{ "cntlz.", XRC(31,26,1), XRB_MASK, POWER, { RA, RS } },
+
+{ "sld", XRC(31,27,0), X_MASK, PPC|B64, { RA, RS, RB } },
+{ "sld.", XRC(31,27,1), X_MASK, PPC|B64, { RA, RS, RB } },
+
+{ "and", XRC(31,28,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "and.", XRC(31,28,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "maskg", XRC(31,29,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "maskg.", XRC(31,29,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "cmplw", XCMPL(31,32,0), XCMPL_MASK, PPC, { OBF, RA, RB } },
+{ "cmpld", XCMPL(31,32,1), XCMPL_MASK, PPC|B64, { OBF, RA, RB } },
+{ "cmpl", X(31,32), XCMP_MASK, PPC, { BF, L, RA, RB } },
+{ "cmpl", X(31,32), XCMPL_MASK, POWER, { BF, RA, RB } },
+
+{ "subf", XO(31,40,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "sub", XO(31,40,0,0), XO_MASK, PPC, { RT, RB, RA } },
+{ "subf.", XO(31,40,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "sub.", XO(31,40,0,1), XO_MASK, PPC, { RT, RB, RA } },
+{ "subfo", XO(31,40,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "subo", XO(31,40,1,0), XO_MASK, PPC, { RT, RB, RA } },
+{ "subfo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "subo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RB, RA } },
+
+{ "ldux", X(31,53), X_MASK, PPC|B64, { RT, RAL, RB } },
+
+{ "dcbst", X(31,54), XRT_MASK, PPC, { RA, RB } },
+
+{ "lwzux", X(31,55), X_MASK, PPC, { RT, RAL, RB } },
+{ "lux", X(31,55), X_MASK, POWER, { RT, RA, RB } },
+
+{ "cntlzd", XRC(31,58,0), XRB_MASK, PPC|B64, { RA, RS } },
+{ "cntlzd.", XRC(31,58,1), XRB_MASK, PPC|B64, { RA, RS } },
+
+{ "andc", XRC(31,60,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "andc.", XRC(31,60,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdgt", XTO(31,68,TOGT), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdge", XTO(31,68,TOGE), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdnl", XTO(31,68,TONL), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdlt", XTO(31,68,TOLT), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdle", XTO(31,68,TOLE), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdng", XTO(31,68,TONG), XTO_MASK, PPC|B64, { RA, RB } },
+{ "tdne", XTO(31,68,TONE), XTO_MASK, PPC|B64, { RA, RB } },
+{ "td", X(31,68), X_MASK, PPC|B64, { TO, RA, RB } },
+
+{ "mulhd", XO(31,73,0,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "mulhd.", XO(31,73,0,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "mulhw", XO(31,75,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "mulhw.", XO(31,75,0,1), XO_MASK, PPC, { RT, RA, RB } },
+
+{ "mfmsr", X(31,83), XRARB_MASK, PPC|POWER, { RT } },
+
+{ "ldarx", X(31,84), X_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "dcbf", X(31,86), XRT_MASK, PPC, { RA, RB } },
+
+{ "lbzx", X(31,87), X_MASK, PPC|POWER, { RT, RA, RB } },
+
+{ "neg", XO(31,104,0,0), XORB_MASK, PPC|POWER, { RT, RA } },
+{ "neg.", XO(31,104,0,1), XORB_MASK, PPC|POWER, { RT, RA } },
+{ "nego", XO(31,104,1,0), XORB_MASK, PPC|POWER, { RT, RA } },
+{ "nego.", XO(31,104,1,1), XORB_MASK, PPC|POWER, { RT, RA } },
+
+{ "mul", XO(31,107,0,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "mul.", XO(31,107,0,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "mulo", XO(31,107,1,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "mulo.", XO(31,107,1,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+
+{ "clf", X(31,118), XRB_MASK, POWER, { RT, RA } },
+
+{ "lbzux", X(31,119), X_MASK, PPC|POWER, { RT, RAL, RB } },
+
+{ "not", XRC(31,124,0), X_MASK, PPC|POWER, { RA, RS, RBS } },
+{ "nor", XRC(31,124,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "not.", XRC(31,124,1), X_MASK, PPC|POWER, { RA, RS, RBS } },
+{ "nor.", XRC(31,124,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "subfe", XO(31,136,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "sfe", XO(31,136,0,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "subfe.", XO(31,136,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "sfe.", XO(31,136,0,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "subfeo", XO(31,136,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "sfeo", XO(31,136,1,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "subfeo.", XO(31,136,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "sfeo.", XO(31,136,1,1), XO_MASK, POWER, { RT, RA, RB } },
+
+{ "adde", XO(31,138,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "ae", XO(31,138,0,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "adde.", XO(31,138,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "ae.", XO(31,138,0,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "addeo", XO(31,138,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "aeo", XO(31,138,1,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "addeo.", XO(31,138,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "aeo.", XO(31,138,1,1), XO_MASK, POWER, { RT, RA, RB } },
+
+{ "mtcr", XFXM(31,144,0xff), XFXFXM_MASK|FXM_MASK, PPC|POWER, { RS }},
+{ "mtcrf", X(31,144), XFXFXM_MASK, PPC|POWER, { FXM, RS } },
+
+{ "mtmsr", X(31,146), XRARB_MASK, PPC|POWER, { RS } },
+{ "mtmsrd", X(31,178), XRARB_MASK, PPC|POWER, { RS } },
+
+{ "stdx", X(31,149), X_MASK, PPC|B64, { RS, RA, RB } },
+
+{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA, RB } },
+
+{ "stwx", X(31,151), X_MASK, PPC, { RS, RA, RB } },
+{ "stx", X(31,151), X_MASK, POWER, { RS, RA, RB } },
+
+{ "slq", XRC(31,152,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "slq.", XRC(31,152,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "sle", XRC(31,153,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "sle.", XRC(31,153,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "stdux", X(31,181), X_MASK, PPC|B64, { RS, RAS, RB } },
+
+{ "stwux", X(31,183), X_MASK, PPC, { RS, RAS, RB } },
+{ "stux", X(31,183), X_MASK, POWER, { RS, RA, RB } },
+
+{ "sliq", XRC(31,184,0), X_MASK, POWER|M601, { RA, RS, SH } },
+{ "sliq.", XRC(31,184,1), X_MASK, POWER|M601, { RA, RS, SH } },
+
+{ "subfze", XO(31,200,0,0), XORB_MASK, PPC, { RT, RA } },
+{ "sfze", XO(31,200,0,0), XORB_MASK, POWER, { RT, RA } },
+{ "subfze.", XO(31,200,0,1), XORB_MASK, PPC, { RT, RA } },
+{ "sfze.", XO(31,200,0,1), XORB_MASK, POWER, { RT, RA } },
+{ "subfzeo", XO(31,200,1,0), XORB_MASK, PPC, { RT, RA } },
+{ "sfzeo", XO(31,200,1,0), XORB_MASK, POWER, { RT, RA } },
+{ "subfzeo.",XO(31,200,1,1), XORB_MASK, PPC, { RT, RA } },
+{ "sfzeo.", XO(31,200,1,1), XORB_MASK, POWER, { RT, RA } },
+
+{ "addze", XO(31,202,0,0), XORB_MASK, PPC, { RT, RA } },
+{ "aze", XO(31,202,0,0), XORB_MASK, POWER, { RT, RA } },
+{ "addze.", XO(31,202,0,1), XORB_MASK, PPC, { RT, RA } },
+{ "aze.", XO(31,202,0,1), XORB_MASK, POWER, { RT, RA } },
+{ "addzeo", XO(31,202,1,0), XORB_MASK, PPC, { RT, RA } },
+{ "azeo", XO(31,202,1,0), XORB_MASK, POWER, { RT, RA } },
+{ "addzeo.", XO(31,202,1,1), XORB_MASK, PPC, { RT, RA } },
+{ "azeo.", XO(31,202,1,1), XORB_MASK, POWER, { RT, RA } },
+
+{ "mtsr", X(31,210), XRB_MASK|(1<<20), PPC|POWER|B32, { SR, RS } },
+
+{ "stdcx.", XRC(31,214,1), X_MASK, PPC|B64, { RS, RA, RB } },
+
+{ "stbx", X(31,215), X_MASK, PPC|POWER, { RS, RA, RB } },
+
+{ "sllq", XRC(31,216,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "sllq.", XRC(31,216,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "sleq", XRC(31,217,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "sleq.", XRC(31,217,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "subfme", XO(31,232,0,0), XORB_MASK, PPC, { RT, RA } },
+{ "sfme", XO(31,232,0,0), XORB_MASK, POWER, { RT, RA } },
+{ "subfme.", XO(31,232,0,1), XORB_MASK, PPC, { RT, RA } },
+{ "sfme.", XO(31,232,0,1), XORB_MASK, POWER, { RT, RA } },
+{ "subfmeo", XO(31,232,1,0), XORB_MASK, PPC, { RT, RA } },
+{ "sfmeo", XO(31,232,1,0), XORB_MASK, POWER, { RT, RA } },
+{ "subfmeo.",XO(31,232,1,1), XORB_MASK, PPC, { RT, RA } },
+{ "sfmeo.", XO(31,232,1,1), XORB_MASK, POWER, { RT, RA } },
+
+{ "mulld", XO(31,233,0,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "mulld.", XO(31,233,0,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "mulldo", XO(31,233,1,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "mulldo.", XO(31,233,1,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "addme", XO(31,234,0,0), XORB_MASK, PPC, { RT, RA } },
+{ "ame", XO(31,234,0,0), XORB_MASK, POWER, { RT, RA } },
+{ "addme.", XO(31,234,0,1), XORB_MASK, PPC, { RT, RA } },
+{ "ame.", XO(31,234,0,1), XORB_MASK, POWER, { RT, RA } },
+{ "addmeo", XO(31,234,1,0), XORB_MASK, PPC, { RT, RA } },
+{ "ameo", XO(31,234,1,0), XORB_MASK, POWER, { RT, RA } },
+{ "addmeo.", XO(31,234,1,1), XORB_MASK, PPC, { RT, RA } },
+{ "ameo.", XO(31,234,1,1), XORB_MASK, POWER, { RT, RA } },
+
+{ "mullw", XO(31,235,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "muls", XO(31,235,0,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "mullw.", XO(31,235,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "muls.", XO(31,235,0,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "mullwo", XO(31,235,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "mulso", XO(31,235,1,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "mullwo.", XO(31,235,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "mulso.", XO(31,235,1,1), XO_MASK, POWER, { RT, RA, RB } },
+
+{ "mtsrin", X(31,242), XRA_MASK, PPC|B32, { RS, RB } },
+{ "mtsri", X(31,242), XRA_MASK, POWER|B32, { RS, RB } },
+
+{ "dcbtst", X(31,246), XRT_MASK, PPC, { RA, RB } },
+
+{ "stbux", X(31,247), X_MASK, PPC|POWER, { RS, RAS, RB } },
+
+{ "slliq", XRC(31,248,0), X_MASK, POWER|M601, { RA, RS, SH } },
+{ "slliq.", XRC(31,248,1), X_MASK, POWER|M601, { RA, RS, SH } },
+
+{ "doz", XO(31,264,0,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "doz.", XO(31,264,0,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "dozo", XO(31,264,1,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "dozo.", XO(31,264,1,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+
+{ "add", XO(31,266,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "cax", XO(31,266,0,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "add.", XO(31,266,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "cax.", XO(31,266,0,1), XO_MASK, POWER, { RT, RA, RB } },
+{ "addo", XO(31,266,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "caxo", XO(31,266,1,0), XO_MASK, POWER, { RT, RA, RB } },
+{ "addo.", XO(31,266,1,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "caxo.", XO(31,266,1,1), XO_MASK, POWER, { RT, RA, RB } },
+
+{ "lscbx", XRC(31,277,0), X_MASK, POWER|M601, { RT, RA, RB } },
+{ "lscbx.", XRC(31,277,1), X_MASK, POWER|M601, { RT, RA, RB } },
+
+{ "dcbt", X(31,278), XRT_MASK, PPC, { RA, RB } },
+
+{ "lhzx", X(31,279), X_MASK, PPC|POWER, { RT, RA, RB } },
+
+{ "icbt", X(31,262), XRT_MASK, PPC, { RA, RB } },
+
+{ "eqv", XRC(31,284,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "eqv.", XRC(31,284,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "tlbie", X(31,306), XRTRA_MASK, PPC, { RB } },
+{ "tlbi", X(31,306), XRTRA_MASK, POWER, { RB } },
+
+{ "eciwx", X(31,310), X_MASK, PPC, { RT, RA, RB } },
+
+{ "lhzux", X(31,311), X_MASK, PPC|POWER, { RT, RAL, RB } },
+
+{ "xor", XRC(31,316,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "xor.", XRC(31,316,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "mfdcr", X(31,323), X_MASK, PPC, { RT, SPR } },
+
+{ "div", XO(31,331,0,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "div.", XO(31,331,0,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "divo", XO(31,331,1,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "divo.", XO(31,331,1,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+
+{ "mfmq", XSPR(31,339,0), XSPR_MASK, POWER|M601, { RT } },
+{ "mfxer", XSPR(31,339,1), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfrtcu", XSPR(31,339,4), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfrtcl", XSPR(31,339,5), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfdec", XSPR(31,339,6), XSPR_MASK, POWER|M601, { RT } },
+{ "mflr", XSPR(31,339,8), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfctr", XSPR(31,339,9), XSPR_MASK, PPC|POWER, { RT } },
+{ "mftid", XSPR(31,339,17), XSPR_MASK, POWER, { RT } },
+{ "mfdsisr", XSPR(31,339,18), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfdar", XSPR(31,339,19), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfdec", XSPR(31,339,22), XSPR_MASK, PPC, { RT } },
+{ "mfsdr0", XSPR(31,339,24), XSPR_MASK, POWER, { RT } },
+{ "mfsdr1", XSPR(31,339,25), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfsrr0", XSPR(31,339,26), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfsrr1", XSPR(31,339,27), XSPR_MASK, PPC|POWER, { RT } },
+{ "mfsprg", XSPR(31,339,272), XSPRG_MASK, PPC, { RT, SPRG } },
+{ "mfasr", XSPR(31,339,280), XSPR_MASK, PPC|B64, { RT } },
+{ "mfear", XSPR(31,339,282), XSPR_MASK, PPC, { RT } },
+{ "mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, { RT } },
+{ "mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
+{ "mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
+{ "mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
+{ "mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
+{ "mfspr", X(31,339), X_MASK, PPC|POWER, { RT, SPR } },
+
+{ "lwax", X(31,341), X_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "lhax", X(31,343), X_MASK, PPC|POWER, { RT, RA, RB } },
+
+{ "dccci", X(31,454), XRT_MASK, PPC, { RA, RB } },
+
+{ "abs", XO(31,360,0,0), XORB_MASK, POWER|M601, { RT, RA } },
+{ "abs.", XO(31,360,0,1), XORB_MASK, POWER|M601, { RT, RA } },
+{ "abso", XO(31,360,1,0), XORB_MASK, POWER|M601, { RT, RA } },
+{ "abso.", XO(31,360,1,1), XORB_MASK, POWER|M601, { RT, RA } },
+
+{ "divs", XO(31,363,0,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "divs.", XO(31,363,0,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "divso", XO(31,363,1,0), XO_MASK, POWER|M601, { RT, RA, RB } },
+{ "divso.", XO(31,363,1,1), XO_MASK, POWER|M601, { RT, RA, RB } },
+
+{ "tlbia", X(31,370), 0xffffffff, PPC, { 0 } },
+
+{ "mftbu", XSPR(31,371,269), XSPR_MASK, PPC, { RT } },
+{ "mftb", X(31,371), X_MASK, PPC, { RT, TBR } },
+
+{ "lwaux", X(31,373), X_MASK, PPC|B64, { RT, RAL, RB } },
+
+{ "lhaux", X(31,375), X_MASK, PPC|POWER, { RT, RAL, RB } },
+
+{ "sthx", X(31,407), X_MASK, PPC|POWER, { RS, RA, RB } },
+
+{ "lfqx", X(31,791), X_MASK, POWER2, { FRT, RA, RB } },
+
+{ "lfqux", X(31,823), X_MASK, POWER2, { FRT, RA, RB } },
+
+{ "stfqx", X(31,919), X_MASK, POWER2, { FRS, RA, RB } },
+
+{ "stfqux", X(31,951), X_MASK, POWER2, { FRS, RA, RB } },
+
+{ "orc", XRC(31,412,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "orc.", XRC(31,412,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "sradi", XS(31,413,0), XS_MASK, PPC|B64, { RA, RS, SH6 } },
+{ "sradi.", XS(31,413,1), XS_MASK, PPC|B64, { RA, RS, SH6 } },
+
+{ "slbie", X(31,434), XRTRA_MASK, PPC|B64, { RB } },
+
+{ "ecowx", X(31,438), X_MASK, PPC, { RT, RA, RB } },
+
+{ "sthux", X(31,439), X_MASK, PPC|POWER, { RS, RAS, RB } },
+
+{ "mr", XRC(31,444,0), X_MASK, PPC|POWER, { RA, RS, RBS } },
+{ "or", XRC(31,444,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "mr.", XRC(31,444,1), X_MASK, PPC|POWER, { RA, RS, RBS } },
+{ "or.", XRC(31,444,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "mtdcr", X(31,451), X_MASK, PPC, { SPR, RS } },
+
+{ "divdu", XO(31,457,0,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "divdu.", XO(31,457,0,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "divduo", XO(31,457,1,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "divduo.", XO(31,457,1,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "divwu", XO(31,459,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "divwu.", XO(31,459,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "divwuo", XO(31,459,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "divwuo.", XO(31,459,1,1), XO_MASK, PPC, { RT, RA, RB } },
+
+{ "mtmq", XSPR(31,467,0), XSPR_MASK, POWER|M601, { RS } },
+{ "mtxer", XSPR(31,467,1), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtlr", XSPR(31,467,8), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtctr", XSPR(31,467,9), XSPR_MASK, PPC|POWER, { RS } },
+{ "mttid", XSPR(31,467,17), XSPR_MASK, POWER, { RS } },
+{ "mtdsisr", XSPR(31,467,18), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtdar", XSPR(31,467,19), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtrtcu", XSPR(31,467,20), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtrtcl", XSPR(31,467,21), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtdec", XSPR(31,467,22), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtsdr0", XSPR(31,467,24), XSPR_MASK, POWER, { RS } },
+{ "mtsdr1", XSPR(31,467,25), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtsrr0", XSPR(31,467,26), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtsrr1", XSPR(31,467,27), XSPR_MASK, PPC|POWER, { RS } },
+{ "mtsprg", XSPR(31,467,272), XSPRG_MASK, PPC, { SPRG, RS } },
+{ "mtasr", XSPR(31,467,280), XSPR_MASK, PPC|B64, { RS } },
+{ "mtear", XSPR(31,467,282), XSPR_MASK, PPC, { RS } },
+{ "mttbl", XSPR(31,467,284), XSPR_MASK, PPC, { RS } },
+{ "mttbu", XSPR(31,467,285), XSPR_MASK, PPC, { RS } },
+{ "mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
+{ "mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
+{ "mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
+{ "mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
+{ "mtspr", X(31,467), X_MASK, PPC|POWER, { SPR, RS } },
+
+{ "dcbi", X(31,470), XRT_MASK, PPC, { RA, RB } },
+
+{ "nand", XRC(31,476,0), X_MASK, PPC|POWER, { RA, RS, RB } },
+{ "nand.", XRC(31,476,1), X_MASK, PPC|POWER, { RA, RS, RB } },
+
+{ "nabs", XO(31,488,0,0), XORB_MASK, POWER|M601, { RT, RA } },
+{ "nabs.", XO(31,488,0,1), XORB_MASK, POWER|M601, { RT, RA } },
+{ "nabso", XO(31,488,1,0), XORB_MASK, POWER|M601, { RT, RA } },
+{ "nabso.", XO(31,488,1,1), XORB_MASK, POWER|M601, { RT, RA } },
+
+{ "divd", XO(31,489,0,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "divd.", XO(31,489,0,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "divdo", XO(31,489,1,0), XO_MASK, PPC|B64, { RT, RA, RB } },
+{ "divdo.", XO(31,489,1,1), XO_MASK, PPC|B64, { RT, RA, RB } },
+
+{ "divw", XO(31,491,0,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "divw.", XO(31,491,0,1), XO_MASK, PPC, { RT, RA, RB } },
+{ "divwo", XO(31,491,1,0), XO_MASK, PPC, { RT, RA, RB } },
+{ "divwo.", XO(31,491,1,1), XO_MASK, PPC, { RT, RA, RB } },
+
+{ "slbia", X(31,498), 0xffffffff, PPC|B64, { 0 } },
+
+{ "cli", X(31,502), XRB_MASK, POWER, { RT, RA } },
+
+{ "mcrxr", X(31,512), XRARB_MASK|(3<<21), PPC|POWER, { BF } },
+
+{ "clcs", X(31,531), XRB_MASK, POWER|M601, { RT, RA } },
+
+{ "lswx", X(31,533), X_MASK, PPC, { RT, RA, RB } },
+{ "lsx", X(31,533), X_MASK, POWER, { RT, RA, RB } },
+
+{ "lwbrx", X(31,534), X_MASK, PPC, { RT, RA, RB } },
+{ "lbrx", X(31,534), X_MASK, POWER, { RT, RA, RB } },
+
+{ "lfsx", X(31,535), X_MASK, PPC|POWER, { FRT, RA, RB } },
+
+{ "srw", XRC(31,536,0), X_MASK, PPC, { RA, RS, RB } },
+{ "sr", XRC(31,536,0), X_MASK, POWER, { RA, RS, RB } },
+{ "srw.", XRC(31,536,1), X_MASK, PPC, { RA, RS, RB } },
+{ "sr.", XRC(31,536,1), X_MASK, POWER, { RA, RS, RB } },
+
+{ "rrib", XRC(31,537,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "rrib.", XRC(31,537,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "srd", XRC(31,539,0), X_MASK, PPC|B64, { RA, RS, RB } },
+{ "srd.", XRC(31,539,1), X_MASK, PPC|B64, { RA, RS, RB } },
+
+{ "maskir", XRC(31,541,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "maskir.", XRC(31,541,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "tlbsync", X(31,566), 0xffffffff, PPC, { 0 } },
+
+{ "lfsux", X(31,567), X_MASK, PPC|POWER, { FRT, RAS, RB } },
+
+{ "mfsr", X(31,595), XRB_MASK|(1<<20), PPC|POWER|B32, { RT, SR } },
+
+{ "lswi", X(31,597), X_MASK, PPC, { RT, RA, NB } },
+{ "lsi", X(31,597), X_MASK, POWER, { RT, RA, NB } },
+
+{ "sync", X(31,598), 0xffffffff, PPC, { 0 } },
+{ "dcs", X(31,598), 0xffffffff, POWER, { 0 } },
+
+{ "lfdx", X(31,599), X_MASK, PPC|POWER, { FRT, RA, RB } },
+
+{ "mfsri", X(31,627), X_MASK, POWER, { RT, RA, RB } },
+
+{ "dclst", X(31,630), XRB_MASK, POWER, { RS, RA } },
+
+{ "lfdux", X(31,631), X_MASK, PPC|POWER, { FRT, RAS, RB } },
+
+{ "mfsrin", X(31,659), XRA_MASK, PPC|B32, { RT, RB } },
+
+{ "stswx", X(31,661), X_MASK, PPC, { RS, RA, RB } },
+{ "stsx", X(31,661), X_MASK, POWER, { RS, RA, RB } },
+
+{ "stwbrx", X(31,662), X_MASK, PPC, { RS, RA, RB } },
+{ "stbrx", X(31,662), X_MASK, POWER, { RS, RA, RB } },
+
+{ "stfsx", X(31,663), X_MASK, PPC|POWER, { FRS, RA, RB } },
+
+{ "srq", XRC(31,664,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "srq.", XRC(31,664,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "sre", XRC(31,665,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "sre.", XRC(31,665,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "stfsux", X(31,695), X_MASK, PPC|POWER, { FRS, RAS, RB } },
+
+{ "sriq", XRC(31,696,0), X_MASK, POWER|M601, { RA, RS, SH } },
+{ "sriq.", XRC(31,696,1), X_MASK, POWER|M601, { RA, RS, SH } },
+
+{ "stswi", X(31,725), X_MASK, PPC, { RS, RA, NB } },
+{ "stsi", X(31,725), X_MASK, POWER, { RS, RA, NB } },
+
+{ "stfdx", X(31,727), X_MASK, PPC|POWER, { FRS, RA, RB } },
+
+{ "srlq", XRC(31,728,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "srlq.", XRC(31,728,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "sreq", XRC(31,729,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "sreq.", XRC(31,729,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "stfdux", X(31,759), X_MASK, PPC|POWER, { FRS, RAS, RB } },
+
+{ "srliq", XRC(31,760,0), X_MASK, POWER|M601, { RA, RS, SH } },
+{ "srliq.", XRC(31,760,1), X_MASK, POWER|M601, { RA, RS, SH } },
+
+{ "lhbrx", X(31,790), X_MASK, PPC|POWER, { RT, RA, RB } },
+
+{ "sraw", XRC(31,792,0), X_MASK, PPC, { RA, RS, RB } },
+{ "sra", XRC(31,792,0), X_MASK, POWER, { RA, RS, RB } },
+{ "sraw.", XRC(31,792,1), X_MASK, PPC, { RA, RS, RB } },
+{ "sra.", XRC(31,792,1), X_MASK, POWER, { RA, RS, RB } },
+
+{ "srad", XRC(31,794,0), X_MASK, PPC|B64, { RA, RS, RB } },
+{ "srad.", XRC(31,794,1), X_MASK, PPC|B64, { RA, RS, RB } },
+
+{ "rac", X(31,818), X_MASK, POWER, { RT, RA, RB } },
+
+{ "srawi", XRC(31,824,0), X_MASK, PPC, { RA, RS, SH } },
+{ "srai", XRC(31,824,0), X_MASK, POWER, { RA, RS, SH } },
+{ "srawi.", XRC(31,824,1), X_MASK, PPC, { RA, RS, SH } },
+{ "srai.", XRC(31,824,1), X_MASK, POWER, { RA, RS, SH } },
+
+{ "eieio", X(31,854), 0xffffffff, PPC, { 0 } },
+
+{ "sthbrx", X(31,918), X_MASK, PPC|POWER, { RS, RA, RB } },
+
+{ "sraq", XRC(31,920,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "sraq.", XRC(31,920,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "srea", XRC(31,921,0), X_MASK, POWER|M601, { RA, RS, RB } },
+{ "srea.", XRC(31,921,1), X_MASK, POWER|M601, { RA, RS, RB } },
+
+{ "extsh", XRC(31,922,0), XRB_MASK, PPC, { RA, RS } },
+{ "exts", XRC(31,922,0), XRB_MASK, POWER, { RA, RS } },
+{ "extsh.", XRC(31,922,1), XRB_MASK, PPC, { RA, RS } },
+{ "exts.", XRC(31,922,1), XRB_MASK, POWER, { RA, RS } },
+
+{ "sraiq", XRC(31,952,0), X_MASK, POWER|M601, { RA, RS, SH } },
+{ "sraiq.", XRC(31,952,1), X_MASK, POWER|M601, { RA, RS, SH } },
+
+{ "extsb", XRC(31,954,0), XRB_MASK, PPC, { RA, RS} },
+{ "extsb.", XRC(31,954,1), XRB_MASK, PPC, { RA, RS} },
+
+{ "iccci", X(31,966), XRT_MASK, PPC, { RA, RB } },
+
+{ "icbi", X(31,982), XRT_MASK, PPC, { RA, RB } },
+
+{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA, RB } },
+
+{ "extsw", XRC(31,986,0), XRB_MASK, PPC, { RA, RS } },
+{ "extsw.", XRC(31,986,1), XRB_MASK, PPC, { RA, RS } },
+
+{ "dcbz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
+{ "dclz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
+
+{ "lwz", OP(32), OP_MASK, PPC, { RT, D, RA } },
+{ "l", OP(32), OP_MASK, POWER, { RT, D, RA } },
+
+{ "lwzu", OP(33), OP_MASK, PPC, { RT, D, RAL } },
+{ "lu", OP(33), OP_MASK, POWER, { RT, D, RA } },
+
+{ "lbz", OP(34), OP_MASK, PPC|POWER, { RT, D, RA } },
+
+{ "lbzu", OP(35), OP_MASK, PPC|POWER, { RT, D, RAL } },
+
+{ "stw", OP(36), OP_MASK, PPC, { RS, D, RA } },
+{ "st", OP(36), OP_MASK, POWER, { RS, D, RA } },
+
+{ "stwu", OP(37), OP_MASK, PPC, { RS, D, RAS } },
+{ "stu", OP(37), OP_MASK, POWER, { RS, D, RA } },
+
+{ "stb", OP(38), OP_MASK, PPC|POWER, { RS, D, RA } },
+
+{ "stbu", OP(39), OP_MASK, PPC|POWER, { RS, D, RAS } },
+
+{ "lhz", OP(40), OP_MASK, PPC|POWER, { RT, D, RA } },
+
+{ "lhzu", OP(41), OP_MASK, PPC|POWER, { RT, D, RAL } },
+
+{ "lha", OP(42), OP_MASK, PPC|POWER, { RT, D, RA } },
+
+{ "lhau", OP(43), OP_MASK, PPC|POWER, { RT, D, RAL } },
+
+{ "sth", OP(44), OP_MASK, PPC|POWER, { RS, D, RA } },
+
+{ "sthu", OP(45), OP_MASK, PPC|POWER, { RS, D, RAS } },
+
+{ "lmw", OP(46), OP_MASK, PPC, { RT, D, RAM } },
+{ "lm", OP(46), OP_MASK, POWER, { RT, D, RA } },
+
+{ "stmw", OP(47), OP_MASK, PPC, { RS, D, RA } },
+{ "stm", OP(47), OP_MASK, POWER, { RS, D, RA } },
+
+{ "lfs", OP(48), OP_MASK, PPC|POWER, { FRT, D, RA } },
+
+{ "lfsu", OP(49), OP_MASK, PPC|POWER, { FRT, D, RAS } },
+
+{ "lfd", OP(50), OP_MASK, PPC|POWER, { FRT, D, RA } },
+
+{ "lfdu", OP(51), OP_MASK, PPC|POWER, { FRT, D, RAS } },
+
+{ "stfs", OP(52), OP_MASK, PPC|POWER, { FRS, D, RA } },
+
+{ "stfsu", OP(53), OP_MASK, PPC|POWER, { FRS, D, RAS } },
+
+{ "stfd", OP(54), OP_MASK, PPC|POWER, { FRS, D, RA } },
+
+{ "stfdu", OP(55), OP_MASK, PPC|POWER, { FRS, D, RAS } },
+
+{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA } },
+
+{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA } },
+
+{ "ld", DSO(58,0), DS_MASK, PPC|B64, { RT, DS, RA } },
+
+{ "ldu", DSO(58,1), DS_MASK, PPC|B64, { RT, DS, RAL } },
+
+{ "lwa", DSO(58,2), DS_MASK, PPC|B64, { RT, DS, RA } },
+
+{ "fdivs", A(59,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fdivs.", A(59,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+
+{ "fsubs", A(59,20,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fsubs.", A(59,20,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+
+{ "fadds", A(59,21,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fadds.", A(59,21,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+
+{ "fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
+{ "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
+
+{ "fres", A(59,24,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
+{ "fres.", A(59,24,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
+
+{ "fmuls", A(59,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } },
+{ "fmuls.", A(59,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } },
+
+{ "fmsubs", A(59,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fmsubs.", A(59,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+
+{ "fmadds", A(59,29,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fmadds.", A(59,29,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+
+{ "fnmsubs", A(59,30,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fnmsubs.",A(59,30,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+
+{ "fnmadds", A(59,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fnmadds.",A(59,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+
+{ "stfq", OP(60), OP_MASK, POWER2, { FRS, D, RA } },
+
+{ "stfqu", OP(61), OP_MASK, POWER2, { FRS, D, RA } },
+
+{ "std", DSO(62,0), DS_MASK, PPC|B64, { RS, DS, RA } },
+
+{ "stdu", DSO(62,1), DS_MASK, PPC|B64, { RS, DS, RAS } },
+
+{ "fcmpu", X(63,0), X_MASK|(3<<21), PPC|POWER, { BF, FRA, FRB } },
+
+{ "frsp", XRC(63,12,0), XRA_MASK, PPC|POWER, { FRT, FRB } },
+{ "frsp.", XRC(63,12,1), XRA_MASK, PPC|POWER, { FRT, FRB } },
+
+{ "fctiw", XRC(63,14,0), XRA_MASK, PPC, { FRT, FRB } },
+{ "fcir", XRC(63,14,0), XRA_MASK, POWER2, { FRT, FRB } },
+{ "fctiw.", XRC(63,14,1), XRA_MASK, PPC, { FRT, FRB } },
+{ "fcir.", XRC(63,14,1), XRA_MASK, POWER2, { FRT, FRB } },
+
+{ "fctiwz", XRC(63,15,0), XRA_MASK, PPC, { FRT, FRB } },
+{ "fcirz", XRC(63,15,0), XRA_MASK, POWER2, { FRT, FRB } },
+{ "fctiwz.", XRC(63,15,1), XRA_MASK, PPC, { FRT, FRB } },
+{ "fcirz.", XRC(63,15,1), XRA_MASK, POWER2, { FRT, FRB } },
+
+{ "fdiv", A(63,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fd", A(63,18,0), AFRC_MASK, POWER, { FRT, FRA, FRB } },
+{ "fdiv.", A(63,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fd.", A(63,18,1), AFRC_MASK, POWER, { FRT, FRA, FRB } },
+
+{ "fsub", A(63,20,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fs", A(63,20,0), AFRC_MASK, POWER, { FRT, FRA, FRB } },
+{ "fsub.", A(63,20,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fs.", A(63,20,1), AFRC_MASK, POWER, { FRT, FRA, FRB } },
+
+{ "fadd", A(63,21,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fa", A(63,21,0), AFRC_MASK, POWER, { FRT, FRA, FRB } },
+{ "fadd.", A(63,21,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
+{ "fa.", A(63,21,1), AFRC_MASK, POWER, { FRT, FRA, FRB } },
+
+{ "fsqrt", A(63,22,0), AFRAFRC_MASK, PPC|POWER2, { FRT, FRB } },
+{ "fsqrt.", A(63,22,1), AFRAFRC_MASK, PPC|POWER2, { FRT, FRB } },
+
+{ "fsel", A(63,23,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fsel.", A(63,23,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+
+{ "fmul", A(63,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } },
+{ "fm", A(63,25,0), AFRB_MASK, POWER, { FRT, FRA, FRC } },
+{ "fmul.", A(63,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } },
+{ "fm.", A(63,25,1), AFRB_MASK, POWER, { FRT, FRA, FRC } },
+
+{ "frsqrte", A(63,26,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
+{ "frsqrte.",A(63,26,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
+
+{ "fmsub", A(63,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fms", A(63,28,0), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+{ "fmsub.", A(63,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fms.", A(63,28,1), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+
+{ "fmadd", A(63,29,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fma", A(63,29,0), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+{ "fmadd.", A(63,29,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fma.", A(63,29,1), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+
+{ "fnmsub", A(63,30,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fnms", A(63,30,0), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+{ "fnmsub.", A(63,30,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fnms.", A(63,30,1), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+
+{ "fnmadd", A(63,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fnma", A(63,31,0), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+{ "fnmadd.", A(63,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fnma.", A(63,31,1), A_MASK, POWER, { FRT,FRA,FRC,FRB } },
+
+{ "fcmpo", X(63,30), X_MASK|(3<<21), PPC|POWER, { BF, FRA, FRB } },
+
+{ "mtfsb1", XRC(63,38,0), XRARB_MASK, PPC|POWER, { BT } },
+{ "mtfsb1.", XRC(63,38,1), XRARB_MASK, PPC|POWER, { BT } },
+
+{ "fneg", XRC(63,40,0), XRA_MASK, PPC|POWER, { FRT, FRB } },
+{ "fneg.", XRC(63,40,1), XRA_MASK, PPC|POWER, { FRT, FRB } },
+
+{ "mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), PPC|POWER, { BF, BFA } },
+
+{ "mtfsb0", XRC(63,70,0), XRARB_MASK, PPC|POWER, { BT } },
+{ "mtfsb0.", XRC(63,70,1), XRARB_MASK, PPC|POWER, { BT } },
+
+{ "fmr", XRC(63,72,0), XRA_MASK, PPC|POWER, { FRT, FRB } },
+{ "fmr.", XRC(63,72,1), XRA_MASK, PPC|POWER, { FRT, FRB } },
+
+{ "mtfsfi", XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), PPC|POWER, { BF, U } },
+{ "mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), PPC|POWER, { BF, U } },
+
+{ "fnabs", XRC(63,136,0), XRA_MASK, PPC|POWER, { FRT, FRB } },
+{ "fnabs.", XRC(63,136,1), XRA_MASK, PPC|POWER, { FRT, FRB } },
+
+{ "fabs", XRC(63,264,0), XRA_MASK, PPC|POWER, { FRT, FRB } },
+{ "fabs.", XRC(63,264,1), XRA_MASK, PPC|POWER, { FRT, FRB } },
+
+{ "mffs", XRC(63,583,0), XRARB_MASK, PPC|POWER, { FRT } },
+{ "mffs.", XRC(63,583,1), XRARB_MASK, PPC|POWER, { FRT } },
+
+{ "mtfsf", XFL(63,711,0), XFL_MASK, PPC|POWER, { FLM, FRB } },
+{ "mtfsf.", XFL(63,711,1), XFL_MASK, PPC|POWER, { FLM, FRB } },
+
+{ "fctid", XRC(63,814,0), XRA_MASK, PPC|B64, { FRT, FRB } },
+{ "fctid.", XRC(63,814,1), XRA_MASK, PPC|B64, { FRT, FRB } },
+
+{ "fctidz", XRC(63,815,0), XRA_MASK, PPC|B64, { FRT, FRB } },
+{ "fctidz.", XRC(63,815,1), XRA_MASK, PPC|B64, { FRT, FRB } },
+
+{ "fcfid", XRC(63,846,0), XRA_MASK, PPC|B64, { FRT, FRB } },
+{ "fcfid.", XRC(63,846,1), XRA_MASK, PPC|B64, { FRT, FRB } },
+
+};
+
+const int powerpc_num_opcodes =
+ sizeof (powerpc_opcodes) / sizeof (powerpc_opcodes[0]);
+\f
+/* The macro table. This is only used by the assembler. */
+
+const struct powerpc_macro powerpc_macros[] = {
+{ "extldi", 4, PPC|B64, "rldicr %0,%1,%3,(%2)-1" },
+{ "extldi.", 4, PPC|B64, "rldicr. %0,%1,%3,(%2)-1" },
+{ "extrdi", 4, PPC|B64, "rldicl %0,%1,(%2)+(%3),64-(%2)" },
+{ "extrdi.", 4, PPC|B64, "rldicl. %0,%1,(%2)+(%3),64-(%2)" },
+{ "insrdi", 4, PPC|B64, "rldimi %0,%1,64-((%2)+(%3)),%3" },
+{ "insrdi.", 4, PPC|B64, "rldimi. %0,%1,64-((%2)+(%3)),%3" },
+{ "rotrdi", 3, PPC|B64, "rldicl %0,%1,64-(%2),0" },
+{ "rotrdi.", 3, PPC|B64, "rldicl. %0,%1,64-(%2),0" },
+{ "sldi", 3, PPC|B64, "rldicr %0,%1,%2,63-(%2)" },
+{ "sldi.", 3, PPC|B64, "rldicr. %0,%1,%2,63-(%2)" },
+{ "srdi", 3, PPC|B64, "rldicl %0,%1,64-(%2),%2" },
+{ "srdi.", 3, PPC|B64, "rldicl. %0,%1,64-(%2),%2" },
+{ "clrrdi", 3, PPC|B64, "rldicr %0,%1,0,63-(%2)" },
+{ "clrrdi.", 3, PPC|B64, "rldicr. %0,%1,0,63-(%2)" },
+{ "clrlsldi",4, PPC|B64, "rldic %0,%1,%3,(%2)-(%3)" },
+{ "clrlsldi.",4, PPC|B64, "rldic. %0,%1,%3,(%2)-(%3)" },
+
+{ "extlwi", 4, PPC, "rlwinm %0,%1,%3,0,(%2)-1" },
+{ "extlwi.", 4, PPC, "rlwinm. %0,%1,%3,0,(%2)-1" },
+{ "extrwi", 4, PPC, "rlwinm %0,%1,(%2)+(%3),32-(%2),31" },
+{ "extrwi.", 4, PPC, "rlwinm. %0,%1,(%2)+(%3),32-(%2),31" },
+{ "inslwi", 4, PPC, "rlwimi %0,%1,32-(%3),%3,(%2)+(%3)-1" },
+{ "inslwi.", 4, PPC, "rlwimi. %0,%1,32-(%3),%3,(%2)+(%3)-1" },
+{ "insrwi", 4, PPC, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1" },
+{ "insrwi.", 4, PPC, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"},
+{ "rotrwi", 3, PPC, "rlwinm %0,%1,32-(%2),0,31" },
+{ "rotrwi.", 3, PPC, "rlwinm. %0,%1,32-(%2),0,31" },
+{ "slwi", 3, PPC, "rlwinm %0,%1,%2,0,31-(%2)" },
+{ "sli", 3, POWER, "rlinm %0,%1,%2,0,31-(%2)" },
+{ "slwi.", 3, PPC, "rlwinm. %0,%1,%2,0,31-(%2)" },
+{ "sli.", 3, POWER, "rlinm. %0,%1,%2,0,31-(%2)" },
+{ "srwi", 3, PPC, "rlwinm %0,%1,32-(%2),%2,31" },
+{ "sri", 3, POWER, "rlinm %0,%1,32-(%2),%2,31" },
+{ "srwi.", 3, PPC, "rlwinm. %0,%1,32-(%2),%2,31" },
+{ "sri.", 3, POWER, "rlinm. %0,%1,32-(%2),%2,31" },
+{ "clrrwi", 3, PPC, "rlwinm %0,%1,0,0,31-(%2)" },
+{ "clrrwi.", 3, PPC, "rlwinm. %0,%1,0,0,31-(%2)" },
+{ "clrlslwi",4, PPC, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)" },
+{ "clrlslwi.",4, PPC, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)" },
+
+};
+
+const int powerpc_num_macros =
+ sizeof (powerpc_macros) / sizeof (powerpc_macros[0]);
--- /dev/null
+/* ppc.h -- Header file for PowerPC opcode table
+ Copyright 1994 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Cygnus Support
+
+This file is part of GDB, GAS, and the GNU binutils.
+
+GDB, GAS, and the GNU binutils are free software; you can redistribute
+them and/or modify them under the terms of the GNU General Public
+License as published by the Free Software Foundation; either version
+1, or (at your option) any later version.
+
+GDB, GAS, and the GNU binutils are distributed in the hope that they
+will be useful, but WITHOUT ANY WARRANTY; without even the implied
+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this file; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#ifndef PPC_H
+#define PPC_H
+
+/* The opcode table is an array of struct powerpc_opcode. */
+
+struct powerpc_opcode
+{
+ /* The opcode name. */
+ const char *name;
+
+ /* The opcode itself. Those bits which will be filled in with
+ operands are zeroes. */
+ unsigned long opcode;
+
+ /* The opcode mask. This is used by the disassembler. This is a
+ mask containing ones indicating those bits which must match the
+ opcode field, and zeroes indicating those bits which need not
+ match (and are presumably filled in by operands). */
+ unsigned long mask;
+
+ /* One bit flags for the opcode. These are used to indicate which
+ specific processors support the instructions. The defined values
+ are listed below. */
+ unsigned long flags;
+
+ /* An array of operand codes. Each code is an index into the
+ operand table. They appear in the order which the operands must
+ appear in assembly code, and are terminated by a zero. */
+ unsigned char operands[8];
+};
+
+/* The table itself is sorted by major opcode number, and is otherwise
+ in the order in which the disassembler should consider
+ instructions. */
+extern const struct powerpc_opcode powerpc_opcodes[];
+extern const int powerpc_num_opcodes;
+
+/* Values defined for the flags field of a struct powerpc_opcode. */
+
+/* Opcode is defined for the PowerPC architecture. */
+#define PPC_OPCODE_PPC (01)
+
+/* Opcode is defined for the POWER (RS/6000) architecture. */
+#define PPC_OPCODE_POWER (02)
+
+/* Opcode is defined for the POWER2 (Rios 2) architecture. */
+#define PPC_OPCODE_POWER2 (04)
+
+/* Opcode is only defined on 32 bit architectures. */
+#define PPC_OPCODE_32 (010)
+
+/* Opcode is only defined on 64 bit architectures. */
+#define PPC_OPCODE_64 (020)
+
+/* Opcode is supported by the Motorola PowerPC 601 processor. The 601
+ is assumed to support all PowerPC (PPC_OPCODE_PPC) instructions,
+ but it also supports many additional POWER instructions. */
+#define PPC_OPCODE_601 (040)
+
+/* A macro to extract the major opcode from an instruction. */
+#define PPC_OP(i) (((i) >> 26) & 0x3f)
+\f
+/* The operands table is an array of struct powerpc_operand. */
+
+struct powerpc_operand
+{
+ /* The number of bits in the operand. */
+ int bits;
+
+ /* How far the operand is left shifted in the instruction. */
+ int shift;
+
+ /* Insertion function. This is used by the assembler. To insert an
+ operand value into an instruction, check this field.
+
+ If it is NULL, execute
+ i |= (op & ((1 << o->bits) - 1)) << o->shift;
+ (i is the instruction which we are filling in, o is a pointer to
+ this structure, and op is the opcode value; this assumes twos
+ complement arithmetic).
+
+ If this field is not NULL, then simply call it with the
+ instruction and the operand value. It will return the new value
+ of the instruction. If the ERRMSG argument is not NULL, then if
+ the operand value is illegal, *ERRMSG will be set to a warning
+ string (the operand will be inserted in any case). If the
+ operand value is legal, *ERRMSG will be unchanged (most operands
+ can accept any value). */
+ unsigned long (*insert) PARAMS ((unsigned long instruction, long op,
+ const char **errmsg));
+
+ /* Extraction function. This is used by the disassembler. To
+ extract this operand type from an instruction, check this field.
+
+ If it is NULL, compute
+ op = ((i) >> o->shift) & ((1 << o->bits) - 1);
+ if ((o->flags & PPC_OPERAND_SIGNED) != 0
+ && (op & (1 << (o->bits - 1))) != 0)
+ op -= 1 << o->bits;
+ (i is the instruction, o is a pointer to this structure, and op
+ is the result; this assumes twos complement arithmetic).
+
+ If this field is not NULL, then simply call it with the
+ instruction value. It will return the value of the operand. If
+ the INVALID argument is not NULL, *INVALID will be set to
+ non-zero if this operand type can not actually be extracted from
+ this operand (i.e., the instruction does not match). If the
+ operand is valid, *INVALID will not be changed. */
+ long (*extract) PARAMS ((unsigned long instruction, int *invalid));
+
+ /* One bit syntax flags. */
+ unsigned long flags;
+};
+
+/* Elements in the table are retrieved by indexing with values from
+ the operands field of the powerpc_opcodes table. */
+
+extern const struct powerpc_operand powerpc_operands[];
+
+/* Values defined for the flags field of a struct powerpc_operand. */
+
+/* This operand takes signed values. */
+#define PPC_OPERAND_SIGNED (01)
+
+/* This operand takes signed values, but also accepts a full positive
+ range of values when running in 32 bit mode. That is, if bits is
+ 16, it takes any value from -0x8000 to 0xffff. In 64 bit mode,
+ this flag is ignored. */
+#define PPC_OPERAND_SIGNOPT (02)
+
+/* This operand does not actually exist in the assembler input. This
+ is used to support extended mnemonics such as mr, for which two
+ operands fields are identical. The assembler should call the
+ insert function with any op value. The disassembler should call
+ the extract function, ignore the return value, and check the value
+ placed in the valid argument. */
+#define PPC_OPERAND_FAKE (04)
+
+/* The next operand should be wrapped in parentheses rather than
+ separated from this one by a comma. This is used for the load and
+ store instructions which want their operands to look like
+ reg,displacement(reg)
+ */
+#define PPC_OPERAND_PARENS (010)
+
+/* This operand may use the symbolic names for the CR fields, which
+ are
+ lt 0 gt 1 eq 2 so 3 un 3
+ cr0 0 cr1 1 cr2 2 cr3 3
+ cr4 4 cr5 5 cr6 6 cr7 7
+ These may be combined arithmetically, as in cr2*4+gt. These are
+ only supported on the PowerPC, not the POWER. */
+#define PPC_OPERAND_CR (020)
+
+/* This operand names a register. The disassembler uses this to print
+ register names with a leading 'r'. */
+#define PPC_OPERAND_GPR (040)
+
+/* This operand names a floating point register. The disassembler
+ prints these with a leading 'f'. */
+#define PPC_OPERAND_FPR (0100)
+
+/* This operand is a relative branch displacement. The disassembler
+ prints these symbolically if possible. */
+#define PPC_OPERAND_RELATIVE (0200)
+
+/* This operand is an absolute branch address. The disassembler
+ prints these symbolically if possible. */
+#define PPC_OPERAND_ABSOLUTE (0400)
+
+/* This operand is optional, and is zero if omitted. This is used for
+ the optional BF and L fields in the comparison instructions. The
+ assembler must count the number of operands remaining on the line,
+ and the number of operands remaining for the opcode, and decide
+ whether this operand is present or not. The disassembler should
+ print this operand out only if it is not zero. */
+#define PPC_OPERAND_OPTIONAL (01000)
+
+/* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand
+ is omitted, then for the next operand use this operand value plus
+ 1, ignoring the next operand field for the opcode. This wretched
+ hack is needed because the Power rotate instructions can take
+ either 4 or 5 operands. The disassembler should print this operand
+ out regardless of the PPC_OPERAND_OPTIONAL field. */
+#define PPC_OPERAND_NEXT (02000)
+
+/* This operand should be regarded as a negative number for the
+ purposes of overflow checking (i.e., the normal most negative
+ number is disallowed and one more than the normal most positive
+ number is allowed). This flag will only be set for a signed
+ operand. */
+#define PPC_OPERAND_NEGATIVE (04000)
+\f
+/* The POWER and PowerPC assemblers use a few macros. We keep them
+ with the operands table for simplicity. The macro table is an
+ array of struct powerpc_macro. */
+
+struct powerpc_macro
+{
+ /* The macro name. */
+ const char *name;
+
+ /* The number of operands the macro takes. */
+ unsigned int operands;
+
+ /* One bit flags for the opcode. These are used to indicate which
+ specific processors support the instructions. The values are the
+ same as those for the struct powerpc_opcode flags field. */
+ unsigned long flags;
+
+ /* A format string to turn the macro into a normal instruction.
+ Each %N in the string is replaced with operand number N (zero
+ based). */
+ const char *format;
+};
+
+extern const struct powerpc_macro powerpc_macros[];
+extern const int powerpc_num_macros;
+
+#endif /* PPC_H */
--- /dev/null
+/*
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+
+#define GETREG(reg) \
+ static inline unsigned long get_ ## reg (void) \
+ { unsigned long ret; asm volatile ("mf" #reg " %0" : "=r" (ret) :); return ret; }
+
+#define SETREG(reg) \
+ static inline void set_ ## reg (unsigned long val) \
+ { asm volatile ("mt" #reg " %0" : : "r" (val)); }
+
+GETREG(msr)
+SETREG(msrd)
+GETREG(cr)
+
+#define GSETSPR(n, name) \
+ static inline long get_ ## name (void) \
+ { long ret; asm volatile ("mfspr %0," #n : "=r" (ret) : ); return ret; } \
+ static inline void set_ ## name (long val) \
+ { asm volatile ("mtspr " #n ",%0" : : "r" (val)); }
+
+GSETSPR(0, mq)
+GSETSPR(1, xer)
+GSETSPR(4, rtcu)
+GSETSPR(5, rtcl)
+GSETSPR(8, lr)
+GSETSPR(9, ctr)
+GSETSPR(18, dsisr)
+GSETSPR(19, dar)
+GSETSPR(22, dec)
+GSETSPR(25, sdr1)
+GSETSPR(26, srr0)
+GSETSPR(27, srr1)
+GSETSPR(272, sprg0)
+GSETSPR(273, sprg1)
+GSETSPR(274, sprg2)
+GSETSPR(275, sprg3)
+GSETSPR(282, ear)
+GSETSPR(287, pvr)
+GSETSPR(528, bat0u)
+GSETSPR(529, bat0l)
+GSETSPR(530, bat1u)
+GSETSPR(531, bat1l)
+GSETSPR(532, bat2u)
+GSETSPR(533, bat2l)
+GSETSPR(534, bat3u)
+GSETSPR(535, bat3l)
+GSETSPR(1008, hid0)
+GSETSPR(1009, hid1)
+GSETSPR(1010, iabr)
+GSETSPR(1013, dabr)
+GSETSPR(1023, pir)
+
+static inline int get_sr(int n)
+{
+ int ret;
+
+#if 0
+ // DRENG does not assemble
+ asm (" mfsrin %0,%1" : "=r" (ret) : "r" (n << 28));
+#endif
+ return ret;
+}
+
+static inline void set_sr(int n, int val)
+{
+#if 0
+ // DRENG does not assemble
+ asm ("mtsrin %0,%1" : : "r" (val), "r" (n << 28));
+#endif
+}
+
+static inline void store_inst(void *p)
+{
+ asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
+}
+
+static inline void cflush(void *p)
+{
+ asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
+}
+
+static inline void cinval(void *p)
+{
+ asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
+}
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * NB this file must be compiled with -O2.
+ */
+
+int
+xmon_setjmp(long *buf) /* NOTE: assert(sizeof(buf) > 184) */
+{
+ /* XXX should save fp regs as well */
+ asm volatile (
+ "mflr 0; std 0,0(%0)\n\
+ std 1,8(%0)\n\
+ std 2,16(%0)\n\
+ mfcr 0; std 0,24(%0)\n\
+ std 13,32(%0)\n\
+ std 14,40(%0)\n\
+ std 15,48(%0)\n\
+ std 16,56(%0)\n\
+ std 17,64(%0)\n\
+ std 18,72(%0)\n\
+ std 19,80(%0)\n\
+ std 20,88(%0)\n\
+ std 21,96(%0)\n\
+ std 22,104(%0)\n\
+ std 23,112(%0)\n\
+ std 24,120(%0)\n\
+ std 25,128(%0)\n\
+ std 26,136(%0)\n\
+ std 27,144(%0)\n\
+ std 28,152(%0)\n\
+ std 29,160(%0)\n\
+ std 30,168(%0)\n\
+ std 31,176(%0)\n\
+ " : : "r" (buf));
+ return 0;
+}
+
+void
+xmon_longjmp(long *buf, int val)
+{
+ if (val == 0)
+ val = 1;
+ asm volatile (
+ "ld 13,32(%0)\n\
+ ld 14,40(%0)\n\
+ ld 15,48(%0)\n\
+ ld 16,56(%0)\n\
+ ld 17,64(%0)\n\
+ ld 18,72(%0)\n\
+ ld 19,80(%0)\n\
+ ld 20,88(%0)\n\
+ ld 21,96(%0)\n\
+ ld 22,104(%0)\n\
+ ld 23,112(%0)\n\
+ ld 24,120(%0)\n\
+ ld 25,128(%0)\n\
+ ld 26,136(%0)\n\
+ ld 27,144(%0)\n\
+ ld 28,152(%0)\n\
+ ld 29,160(%0)\n\
+ ld 30,168(%0)\n\
+ ld 31,176(%0)\n\
+ ld 0,24(%0)\n\
+ mtcrf 0x38,0\n\
+ ld 0,0(%0)\n\
+ ld 1,8(%0)\n\
+ ld 2,16(%0)\n\
+ mtlr 0\n\
+ mr 3,%1\n\
+ " : : "r" (buf), "r" (val));
+}
--- /dev/null
+/*
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/pmu.h>
+#include <linux/kernel.h>
+#include <linux/sysrq.h>
+#include <asm/prom.h>
+#include <asm/processor.h>
+
+/* Transition to udbg isn't quite done yet...but very close. */
+#define USE_UDBG
+#ifdef USE_UDBG
+#include <asm/udbg.h>
+#endif
+
+#ifndef USE_UDBG
+static volatile unsigned char *sccc, *sccd;
+#endif
+unsigned long TXRDY, RXRDY;
+extern void xmon_printf(const char *fmt, ...);
+static int xmon_expect(const char *str, unsigned int timeout);
+
+#ifndef USE_UDBG
+static int console = 0;
+#endif
+static int via_modem = 0;
+/* static int xmon_use_sccb = 0; --Unused */
+
+#define TB_SPEED 25000000
+
+extern void *comport1;
+static inline unsigned int readtb(void)
+{
+ unsigned int ret;
+
+ asm volatile("mftb %0" : "=r" (ret) :);
+ return ret;
+}
+
+#ifndef USE_UDBG
+void buf_access(void)
+{
+ sccd[3] &= ~0x80; /* reset DLAB */
+}
+#endif
+
+static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs, struct kbd_struct *kbd, struct tty_struct *tty)
+{
+ xmon(pt_regs);
+}
+static struct sysrq_key_op sysrq_xmon_op =
+{
+ handler: sysrq_handle_xmon,
+ help_msg: "xmon",
+ action_msg: "Entering xmon\n",
+};
+
+void
+xmon_map_scc(void)
+{
+ /* This maybe isn't the best place to register sysrq 'x' */
+ __sysrq_put_key_op('x', &sysrq_xmon_op);
+#ifndef USE_UDBG
+ /* should already be mapped by the kernel boot */
+ sccd = (volatile unsigned char *) (((unsigned long)comport1));
+ sccc = (volatile unsigned char *) (((unsigned long)comport1)+5);
+ TXRDY = 0x20;
+ RXRDY = 1;
+#endif
+}
+
+static int scc_initialized = 0;
+
+void xmon_init_scc(void);
+extern void pmu_poll(void);
+
+int
+xmon_write(void *handle, void *ptr, int nb)
+{
+#ifdef USE_UDBG
+ return udbg_write(ptr, nb);
+#else
+ char *p = ptr;
+ int i, c, ct;
+
+ if (!scc_initialized)
+ xmon_init_scc();
+ ct = 0;
+ for (i = 0; i < nb; ++i) {
+ while ((*sccc & TXRDY) == 0) {
+ }
+ c = p[i];
+ if (c == '\n' && !ct) {
+ c = '\r';
+ ct = 1;
+ --i;
+ } else {
+ if (console)
+ printk("%c", c);
+ ct = 0;
+ }
+ buf_access();
+ *sccd = c;
+ }
+ return i;
+#endif
+}
+
+int xmon_wants_key;
+
+int
+xmon_read(void *handle, void *ptr, int nb)
+{
+#ifdef USE_UDBG
+ return udbg_read(ptr, nb);
+#else
+ char *p = ptr;
+ int i, c;
+
+ if (!scc_initialized)
+ xmon_init_scc();
+ for (i = 0; i < nb; ++i) {
+ do {
+ while ((*sccc & RXRDY) == 0)
+ ;
+ buf_access();
+ c = *sccd;
+ } while (c == 0x11 || c == 0x13);
+ *p++ = c;
+ }
+ return i;
+#endif
+}
+
+int
+xmon_read_poll(void)
+{
+#ifdef USE_UDBG
+ return udbg_getc_poll();
+#else
+ if ((*sccc & RXRDY) == 0) {
+ return -1;
+ }
+ buf_access();
+ return *sccd;
+#endif
+}
+
+void
+xmon_init_scc()
+{
+#ifndef USE_UDBG
+ sccd[3] = 0x83; eieio(); /* LCR = 8N1 + DLAB */
+ sccd[0] = 12; eieio(); /* DLL = 9600 baud */
+ sccd[1] = 0; eieio();
+ sccd[2] = 0; eieio(); /* FCR = 0 */
+ sccd[3] = 3; eieio(); /* LCR = 8N1 */
+ sccd[1] = 0; eieio(); /* IER = 0 */
+#endif
+
+ scc_initialized = 1;
+ if (via_modem) {
+ for (;;) {
+ xmon_write(0, "ATE1V1\r", 7);
+ if (xmon_expect("OK", 5)) {
+ xmon_write(0, "ATA\r", 4);
+ if (xmon_expect("CONNECT", 40))
+ break;
+ }
+ xmon_write(0, "+++", 3);
+ xmon_expect("OK", 3);
+ }
+ }
+}
+
+void *xmon_stdin;
+void *xmon_stdout;
+void *xmon_stderr;
+
+void
+xmon_init(void)
+{
+}
+
+int
+xmon_putc(int c, void *f)
+{
+ char ch = c;
+
+ if (c == '\n')
+ xmon_putc('\r', f);
+ return xmon_write(f, &ch, 1) == 1? c: -1;
+}
+
+int
+xmon_putchar(int c)
+{
+ return xmon_putc(c, xmon_stdout);
+}
+
+int
+xmon_fputs(char *str, void *f)
+{
+ int n = strlen(str);
+
+ return xmon_write(f, str, n) == n? 0: -1;
+}
+
+int
+xmon_readchar(void)
+{
+ char ch;
+
+ for (;;) {
+ switch (xmon_read(xmon_stdin, &ch, 1)) {
+ case 1:
+ return ch;
+ case -1:
+ xmon_printf("read(stdin) returned -1\r\n", 0, 0);
+ return -1;
+ }
+ }
+}
+
+static char line[256];
+static char *lineptr;
+static int lineleft;
+
+int xmon_expect(const char *str, unsigned int timeout)
+{
+ int c;
+ unsigned int t0;
+
+ timeout *= TB_SPEED;
+ t0 = readtb();
+ do {
+ lineptr = line;
+ for (;;) {
+ c = xmon_read_poll();
+ if (c == -1) {
+ if (readtb() - t0 > timeout)
+ return 0;
+ continue;
+ }
+ if (c == '\n')
+ break;
+ if (c != '\r' && lineptr < &line[sizeof(line) - 1])
+ *lineptr++ = c;
+ }
+ *lineptr = 0;
+ } while (strstr(line, str) == NULL);
+ return 1;
+}
+
+int
+xmon_getchar(void)
+{
+ int c;
+
+ if (lineleft == 0) {
+ lineptr = line;
+ for (;;) {
+ c = xmon_readchar();
+ if (c == -1 || c == 4)
+ break;
+ if (c == '\r' || c == '\n') {
+ *lineptr++ = '\n';
+ xmon_putchar('\n');
+ break;
+ }
+ switch (c) {
+ case 0177:
+ case '\b':
+ if (lineptr > line) {
+ xmon_putchar('\b');
+ xmon_putchar(' ');
+ xmon_putchar('\b');
+ --lineptr;
+ }
+ break;
+ case 'U' & 0x1F:
+ while (lineptr > line) {
+ xmon_putchar('\b');
+ xmon_putchar(' ');
+ xmon_putchar('\b');
+ --lineptr;
+ }
+ break;
+ default:
+ if (lineptr >= &line[sizeof(line) - 1])
+ xmon_putchar('\a');
+ else {
+ xmon_putchar(c);
+ *lineptr++ = c;
+ }
+ }
+ }
+ lineleft = lineptr - line;
+ lineptr = line;
+ }
+ if (lineleft == 0)
+ return -1;
+ --lineleft;
+ return *lineptr++;
+}
+
+char *
+xmon_fgets(char *str, int nb, void *f)
+{
+ char *p;
+ int c;
+
+ for (p = str; p < str + nb - 1; ) {
+ c = xmon_getchar();
+ if (c == -1) {
+ if (p == str)
+ return 0;
+ break;
+ }
+ *p++ = c;
+ if (c == '\n')
+ break;
+ }
+ *p = 0;
+ return str;
+}
--- /dev/null
+/*
+ * Written by Cort Dougan to replace the version originally used
+ * by Paul Mackerras, which came from NetBSD and thus had copyright
+ * conflicts with Linux.
+ *
+ * This file makes liberal use of the standard linux utility
+ * routines to reduce the size of the binary. We assume we can
+ * trust some parts of Linux inside the debugger.
+ * -- Cort (cort@cs.nmt.edu)
+ *
+ * Copyright (C) 1999 Cort Dougan.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <stdarg.h>
+#include "nonstdio.h"
+
+extern int xmon_write(void *, void *, int);
+
+void
+xmon_vfprintf(void *f, const char *fmt, va_list ap)
+{
+ static char xmon_buf[2048];
+ int n;
+
+ n = vsprintf(xmon_buf, fmt, ap);
+ xmon_write(f, xmon_buf, n);
+}
+
+void
+xmon_printf(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xmon_vfprintf(stdout, fmt, ap);
+ va_end(ap);
+}
+
+void
+xmon_fprintf(void *f, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xmon_vfprintf(f, fmt, ap);
+ va_end(ap);
+}
+
--- /dev/null
+/*
+ * Routines providing a simple monitor for use on the PowerMac.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/ptrace.h>
+#include <asm/string.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/Naca.h>
+#include <asm/Paca.h>
+#include "nonstdio.h"
+#include "privinst.h"
+#include <linux/mm.h>
+
+#include <asm/ppcdebug.h>
+
+#include <asm/Paca.h>
+
+#define scanhex xmon_scanhex
+#define skipbl xmon_skipbl
+
+#ifdef CONFIG_SMP
+static unsigned long cpus_in_xmon = 0;
+static unsigned long got_xmon = 0;
+static volatile int take_xmon = -1;
+#endif /* CONFIG_SMP */
+
+static unsigned long adrs;
+static int size = 1;
+static unsigned long ndump = 64;
+static unsigned long nidump = 16;
+static unsigned long ncsum = 4096;
+static int termch;
+
+static u_int bus_error_jmp[100];
+#define setjmp xmon_setjmp
+#define longjmp xmon_longjmp
+
+#define memlist_entry list_entry
+#define memlist_next(x) ((x)->next)
+#define memlist_prev(x) ((x)->prev)
+
+
+/* Max number of stack frames we are willing to produce on a backtrace. */
+#define MAXFRAMECOUNT 50
+
+/* Breakpoint stuff */
+struct bpt {
+ unsigned long address;
+ unsigned instr;
+ unsigned long count;
+ unsigned char enabled;
+ char funcname[64]; /* function name for humans */
+};
+
+#define NBPTS 16
+static struct bpt bpts[NBPTS];
+static struct bpt dabr;
+static struct bpt iabr;
+static unsigned bpinstr = 0x7fe00008; /* trap */
+
+/* Prototypes */
+extern void (*debugger_fault_handler)(struct pt_regs *);
+static int cmds(struct pt_regs *);
+static int mread(unsigned long, void *, int);
+static int mwrite(unsigned long, void *, int);
+static void handle_fault(struct pt_regs *);
+static void byterev(unsigned char *, int);
+static void memex(void);
+static int bsesc(void);
+static void dump(void);
+static void prdump(unsigned long, long);
+#ifdef __MWERKS__
+static void prndump(unsigned, int);
+static int nvreadb(unsigned);
+#endif
+static int ppc_inst_dump(unsigned long, long);
+void print_address(unsigned long);
+static int getsp(void);
+static void dump_hash_table(void);
+static void backtrace(struct pt_regs *);
+static void excprint(struct pt_regs *);
+static void prregs(struct pt_regs *);
+static void memops(int);
+static void memlocate(void);
+static void memzcan(void);
+static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned);
+int skipbl(void);
+int scanhex(unsigned long *valp);
+static void scannl(void);
+static int hexdigit(int);
+void getstring(char *, int);
+static void flush_input(void);
+static int inchar(void);
+static void take_input(char *);
+/* static void openforth(void); */
+static unsigned long read_spr(int);
+static void write_spr(int, unsigned long);
+static void super_regs(void);
+static void print_sysmap(void);
+static void remove_bpts(void);
+static void insert_bpts(void);
+static struct bpt *at_breakpoint(unsigned long pc);
+static void bpt_cmds(void);
+static void cacheflush(void);
+#ifdef CONFIG_SMP
+static void cpu_cmd(void);
+#endif /* CONFIG_SMP */
+static void csum(void);
+static void mem_translate(void);
+static void mem_check(void);
+static void mem_find_real(void);
+static void mem_find_vsid(void);
+static void mem_check_full_group(void);
+static void mem_check_pagetable_vsids (void);
+
+static void mem_map_check_slab(void);
+static void mem_map_lock_pages(void);
+static void mem_map_check_hash(void);
+static void mem_check_dup_rpn (void);
+static void show_task(struct task_struct * p);
+static void xmon_show_state(void);
+static void debug_trace(void);
+
+extern int print_insn_big_powerpc(FILE *, unsigned long, unsigned long);
+extern void printf(const char *fmt, ...);
+extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
+extern int xmon_putc(int c, void *f);
+extern int putchar(int ch);
+extern int xmon_read_poll(void);
+extern int setjmp(u_int *);
+extern void longjmp(u_int *, int);
+extern unsigned long _ASR;
+extern struct Naca *naca;
+
+pte_t *find_linux_pte(pgd_t *pgdir, unsigned long va); /* from htab.c */
+
+#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
+
+static char *help_string = "\
+Commands:\n\
+ b show breakpoints\n\
+ bd set data breakpoint\n\
+ bi set instruction breakpoint\n\
+ bc clear breakpoint\n\
+ d dump bytes\n\
+ di dump instructions\n\
+ df dump float values\n\
+ dd dump double values\n\
+ e print exception information\n\
+ f flush cache\n\
+ h dump hash table\n\
+ m examine/change memory\n\
+ mm move a block of memory\n\
+ ms set a block of memory\n\
+ md compare two blocks of memory\n\
+ ml locate a block of memory\n\
+ mz zero a block of memory\n\
+ mx translation information for an effective address\n\
+ mi show information about memory allocation\n\
+ M print System.map\n\
+ p show the task list\n\
+ r print registers\n\
+ s single step\n\
+ S print special registers\n\
+ t print backtrace\n\
+ T Enable/Disable PPCDBG flags\n\
+ x exit monitor\n\
+ z reboot\n\
+";
+
+static int xmon_trace[NR_CPUS];
+#define SSTEP 1 /* stepping because of 's' command */
+#define BRSTEP 2 /* stepping over breakpoint */
+
+/*
+ * Stuff for reading and writing memory safely
+ */
+extern inline void sync(void)
+{
+ asm volatile("sync; isync");
+}
+
+extern inline void __delay(unsigned int loops)
+{
+ if (loops != 0)
+ __asm__ __volatile__("mtctr %0; 1: bdnz 1b" : :
+ "r" (loops) : "ctr");
+}
+
+/* (Ref: 64-bit PowerPC ELF ABI Spplement; Ian Lance Taylor, Zembu Labs).
+ A PPC stack frame looks like this:
+
+ High Address
+ Back Chain
+ FP reg save area
+ GP reg save area
+ Local var space
+ Parameter save area (SP+48)
+ TOC save area (SP+40)
+ link editor doubleword (SP+32)
+ compiler doubleword (SP+24)
+ LR save (SP+16)
+ CR save (SP+8)
+ Back Chain (SP+0)
+
+ Note that the LR (ret addr) may not be saved in the current frame if
+ no functions have been called from the current function.
+ */
+
+/*
+ A traceback table typically follows each function.
+ The find_tb_table() func will fill in this struct. Note that the struct
+ is not an exact match with the encoded table defined by the ABI. It is
+ defined here more for programming convenience.
+ */
+struct tbtable {
+ unsigned long flags; /* flags: */
+#define TBTAB_FLAGSGLOBALLINK (1L<<47)
+#define TBTAB_FLAGSISEPROL (1L<<46)
+#define TBTAB_FLAGSHASTBOFF (1L<<45)
+#define TBTAB_FLAGSINTPROC (1L<<44)
+#define TBTAB_FLAGSHASCTL (1L<<43)
+#define TBTAB_FLAGSTOCLESS (1L<<42)
+#define TBTAB_FLAGSFPPRESENT (1L<<41)
+#define TBTAB_FLAGSNAMEPRESENT (1L<<38)
+#define TBTAB_FLAGSUSESALLOCA (1L<<37)
+#define TBTAB_FLAGSSAVESCR (1L<<33)
+#define TBTAB_FLAGSSAVESLR (1L<<32)
+#define TBTAB_FLAGSSTORESBC (1L<<31)
+#define TBTAB_FLAGSFIXUP (1L<<30)
+#define TBTAB_FLAGSPARMSONSTK (1L<<0)
+ unsigned char fp_saved; /* num fp regs saved f(32-n)..f31 */
+ unsigned char gpr_saved; /* num gpr's saved */
+ unsigned char fixedparms; /* num fixed point parms */
+ unsigned char floatparms; /* num float parms */
+ unsigned char parminfo[32]; /* types of args. null terminated */
+#define TBTAB_PARMFIXED 1
+#define TBTAB_PARMSFLOAT 2
+#define TBTAB_PARMDFLOAT 3
+ unsigned int tb_offset; /* offset from start of func */
+ unsigned long funcstart; /* addr of start of function */
+ char name[64]; /* name of function (null terminated)*/
+};
+static int find_tb_table(unsigned long codeaddr, struct tbtable *tab);
+
+#define SURVEILLANCE_TOKEN 9000
+
+static inline void disable_surveillance(void)
+{
+ rtas_call(rtas_token("set-indicator"), 3, 1, NULL, SURVEILLANCE_TOKEN,
+ 0, 0);
+}
+
+void
+xmon(struct pt_regs *excp)
+{
+ struct pt_regs regs;
+ int cmd;
+ unsigned long msr;
+
+ if (excp == NULL) {
+ /* Ok, grab regs as they are now.
+ This won't do a particularily good job because the
+ prologue has already been executed.
+ ToDo: We could reach back into the callers save
+ area to do a better job of representing the
+ caller's state.
+ */
+ asm volatile ("std 0,0(%0)\n\
+ std 1,8(%0)\n\
+ std 2,16(%0)\n\
+ std 3,24(%0)\n\
+ std 4,32(%0)\n\
+ std 5,40(%0)\n\
+ std 6,48(%0)\n\
+ std 7,56(%0)\n\
+ std 8,64(%0)\n\
+ std 9,72(%0)\n\
+ std 10,80(%0)\n\
+ std 11,88(%0)\n\
+ std 12,96(%0)\n\
+ std 13,104(%0)\n\
+ std 14,112(%0)\n\
+ std 15,120(%0)\n\
+ std 16,128(%0)\n\
+ std 17,136(%0)\n\
+ std 18,144(%0)\n\
+ std 19,152(%0)\n\
+ std 20,160(%0)\n\
+ std 21,168(%0)\n\
+ std 22,176(%0)\n\
+ std 23,184(%0)\n\
+ std 24,192(%0)\n\
+ std 25,200(%0)\n\
+ std 26,208(%0)\n\
+ std 27,216(%0)\n\
+ std 28,224(%0)\n\
+ std 29,232(%0)\n\
+ std 30,240(%0)\n\
+ std 31,248(%0)" : : "b" (®s));
+ printf("xmon called\n");
+ /* Fetch the link reg for this stack frame.
+ NOTE: the prev printf fills in the lr. */
+ regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
+ regs.msr = get_msr();
+ regs.ctr = get_ctr();
+ regs.xer = get_xer();
+ regs.ccr = get_cr();
+ regs.trap = 0;
+ excp = ®s;
+ }
+
+ msr = get_msr();
+ set_msrd(msr & ~MSR_EE); /* disable interrupts */
+ excprint(excp);
+#ifdef CONFIG_SMP
+ if (test_and_set_bit(smp_processor_id(), &cpus_in_xmon))
+ for (;;)
+ ;
+ while (test_and_set_bit(0, &got_xmon)) {
+ if (take_xmon == smp_processor_id()) {
+ take_xmon = -1;
+ break;
+ }
+ }
+ /*
+ * XXX: breakpoints are removed while any cpu is in xmon
+ */
+#endif /* CONFIG_SMP */
+ remove_bpts();
+ disable_surveillance();
+ cmd = cmds(excp);
+ if (cmd == 's') {
+ xmon_trace[smp_processor_id()] = SSTEP;
+ excp->msr |= 0x400;
+ } else if (at_breakpoint(excp->nip)) {
+ xmon_trace[smp_processor_id()] = BRSTEP;
+ excp->msr |= 0x400;
+ } else {
+ xmon_trace[smp_processor_id()] = 0;
+ insert_bpts();
+ }
+#ifdef CONFIG_SMP
+ clear_bit(0, &got_xmon);
+ clear_bit(smp_processor_id(), &cpus_in_xmon);
+#endif /* CONFIG_SMP */
+ set_msrd(msr); /* restore interrupt enable */
+}
+
+/* Code can call this to get a backtrace and continue. */
+void
+xmon_backtrace(const char *fmt, ...)
+{
+ va_list ap;
+ struct pt_regs regs;
+
+
+ /* Ok, grab regs as they are now.
+ This won't do a particularily good job because the
+ prologue has already been executed.
+ ToDo: We could reach back into the callers save
+ area to do a better job of representing the
+ caller's state.
+ */
+ asm volatile ("std 0,0(%0)\n\
+ std 1,8(%0)\n\
+ std 2,16(%0)\n\
+ std 3,24(%0)\n\
+ std 4,32(%0)\n\
+ std 5,40(%0)\n\
+ std 6,48(%0)\n\
+ std 7,56(%0)\n\
+ std 8,64(%0)\n\
+ std 9,72(%0)\n\
+ std 10,80(%0)\n\
+ std 11,88(%0)\n\
+ std 12,96(%0)\n\
+ std 13,104(%0)\n\
+ std 14,112(%0)\n\
+ std 15,120(%0)\n\
+ std 16,128(%0)\n\
+ std 17,136(%0)\n\
+ std 18,144(%0)\n\
+ std 19,152(%0)\n\
+ std 20,160(%0)\n\
+ std 21,168(%0)\n\
+ std 22,176(%0)\n\
+ std 23,184(%0)\n\
+ std 24,192(%0)\n\
+ std 25,200(%0)\n\
+ std 26,208(%0)\n\
+ std 27,216(%0)\n\
+ std 28,224(%0)\n\
+ std 29,232(%0)\n\
+ std 30,240(%0)\n\
+ std 31,248(%0)" : : "b" (®s));
+ /* Fetch the link reg for this stack frame.
+ NOTE: the prev printf fills in the lr. */
+ regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
+ regs.msr = get_msr();
+ regs.ctr = get_ctr();
+ regs.xer = get_xer();
+ regs.ccr = get_cr();
+ regs.trap = 0;
+
+ va_start(ap, fmt);
+ xmon_vfprintf(stdout, fmt, ap);
+ xmon_putc('\n', stdout);
+ va_end(ap);
+ take_input("\n");
+ backtrace(®s);
+}
+
+/* Call this to poll for ^C during busy operations.
+ * Returns true if the user has hit ^C.
+ */
+int
+xmon_interrupted(void)
+{
+ int ret = xmon_read_poll();
+ if (ret == 3) {
+ printf("\n^C interrupted.\n");
+ return 1;
+ }
+ return 0;
+}
+
+
+void
+xmon_irq(int irq, void *d, struct pt_regs *regs)
+{
+ unsigned long flags;
+ __save_flags(flags);
+ __cli();
+ printf("Keyboard interrupt\n");
+ xmon(regs);
+ __restore_flags(flags);
+}
+
+int
+xmon_bpt(struct pt_regs *regs)
+{
+ struct bpt *bp;
+
+ bp = at_breakpoint(regs->nip);
+ if (!bp)
+ return 0;
+ if (bp->count) {
+ --bp->count;
+ remove_bpts();
+ excprint(regs);
+ xmon_trace[smp_processor_id()] = BRSTEP;
+ regs->msr |= 0x400;
+ } else {
+ printf("Stopped at breakpoint %x (%lx %s)\n", (bp - bpts)+1, bp->address, bp->funcname);
+ xmon(regs);
+ }
+ return 1;
+}
+
+int
+xmon_sstep(struct pt_regs *regs)
+{
+ if (!xmon_trace[smp_processor_id()])
+ return 0;
+ if (xmon_trace[smp_processor_id()] == BRSTEP) {
+ xmon_trace[smp_processor_id()] = 0;
+ insert_bpts();
+ } else {
+ xmon(regs);
+ }
+ return 1;
+}
+
+int
+xmon_dabr_match(struct pt_regs *regs)
+{
+ if (dabr.enabled && dabr.count) {
+ --dabr.count;
+ remove_bpts();
+ excprint(regs);
+ xmon_trace[smp_processor_id()] = BRSTEP;
+ regs->msr |= 0x400;
+ } else {
+ dabr.instr = regs->nip;
+ xmon(regs);
+ }
+ return 1;
+}
+
+int
+xmon_iabr_match(struct pt_regs *regs)
+{
+ if (iabr.enabled && iabr.count) {
+ --iabr.count;
+ remove_bpts();
+ excprint(regs);
+ xmon_trace[smp_processor_id()] = BRSTEP;
+ regs->msr |= 0x400;
+ } else {
+ xmon(regs);
+ }
+ return 1;
+}
+
+static struct bpt *
+at_breakpoint(unsigned long pc)
+{
+ int i;
+ struct bpt *bp;
+
+ if (dabr.enabled && pc == dabr.instr)
+ return &dabr;
+ if (iabr.enabled && pc == iabr.address)
+ return &iabr;
+ bp = bpts;
+ for (i = 0; i < NBPTS; ++i, ++bp)
+ if (bp->enabled && pc == bp->address)
+ return bp;
+ return 0;
+}
+
+static void
+insert_bpts()
+{
+ int i;
+ struct bpt *bp;
+
+ if (_machine != _MACH_pSeries)
+ return;
+ bp = bpts;
+ for (i = 0; i < NBPTS; ++i, ++bp) {
+ if (!bp->enabled)
+ continue;
+ if (mread(bp->address, &bp->instr, 4) != 4
+ || mwrite(bp->address, &bpinstr, 4) != 4) {
+ printf("Couldn't insert breakpoint at %x, disabling\n",
+ bp->address);
+ bp->enabled = 0;
+ } else {
+ store_inst((void *)bp->address);
+ }
+ }
+
+ if (!__is_processor(PV_POWER4)) {
+ if (dabr.enabled)
+ set_dabr(dabr.address);
+ if (iabr.enabled)
+ set_iabr(iabr.address);
+ }
+}
+
+static void
+remove_bpts()
+{
+ int i;
+ struct bpt *bp;
+ unsigned instr;
+
+ if (_machine != _MACH_pSeries)
+ return;
+ if (!__is_processor(PV_POWER4)) {
+ set_dabr(0);
+ set_iabr(0);
+ }
+
+ bp = bpts;
+ for (i = 0; i < NBPTS; ++i, ++bp) {
+ if (!bp->enabled)
+ continue;
+ if (mread(bp->address, &instr, 4) == 4
+ && instr == bpinstr
+ && mwrite(bp->address, &bp->instr, 4) != 4)
+ printf("Couldn't remove breakpoint at %x\n",
+ bp->address);
+ else
+ store_inst((void *)bp->address);
+ }
+}
+
+static char *last_cmd;
+
+/* Command interpreting routine */
+static int
+cmds(struct pt_regs *excp)
+{
+ int cmd;
+
+ last_cmd = NULL;
+ for(;;) {
+#ifdef CONFIG_SMP
+ printf("%d:", smp_processor_id());
+#endif /* CONFIG_SMP */
+ printf("mon> ");
+ fflush(stdout);
+ flush_input();
+ termch = 0;
+ cmd = skipbl();
+ if( cmd == '\n' ) {
+ if (last_cmd == NULL)
+ continue;
+ take_input(last_cmd);
+ last_cmd = NULL;
+ cmd = inchar();
+ }
+ switch (cmd) {
+ case 'z':
+ machine_restart(NULL);
+ break;
+ case 'm':
+ cmd = inchar();
+ switch (cmd) {
+ case 'm':
+ case 's':
+ case 'd':
+ memops(cmd);
+ break;
+ case 'l':
+ memlocate();
+ break;
+ case 'z':
+ memzcan();
+ break;
+ case 'x':
+ mem_translate();
+ break;
+ case 'c':
+ mem_check();
+ break;
+ case 'g':
+ mem_check_full_group();
+ break;
+ case 'j':
+ mem_map_check_slab();
+ break;
+ case 'h':
+ mem_map_check_hash();
+ break;
+ case 'f':
+ mem_find_real();
+ break;
+ case 'e':
+ mem_find_vsid();
+ break;
+ case 'r':
+ mem_check_dup_rpn();
+ break;
+ case 'i':
+ show_mem();
+ break;
+ case 'o':
+ mem_check_pagetable_vsids ();
+ break;
+ case 'q':
+ mem_map_lock_pages() ;
+ break;
+ default:
+ termch = cmd;
+ memex();
+ }
+ break;
+ case 'd':
+ dump();
+ break;
+ case 'r':
+ if (excp != NULL)
+ prregs(excp); /* print regs */
+ break;
+ case 'e':
+ if (excp == NULL)
+ printf("No exception information\n");
+ else
+ excprint(excp);
+ break;
+ case 'M':
+ print_sysmap();
+ break;
+ case 'S':
+ super_regs();
+ break;
+ case 't':
+ backtrace(excp);
+ break;
+ case 'f':
+ cacheflush();
+ break;
+ case 'h':
+ dump_hash_table();
+ break;
+ case 's':
+ case 'x':
+ case EOF:
+ return cmd;
+ case '?':
+ printf(help_string);
+ break;
+ case 'p':
+ xmon_show_state();
+ break;
+ case 'b':
+ bpt_cmds();
+ break;
+ case 'C':
+ csum();
+ break;
+#ifdef CONFIG_SMP
+ case 'c':
+ cpu_cmd();
+ break;
+#endif /* CONFIG_SMP */
+ case 'T':
+ debug_trace();
+ break;
+ default:
+ printf("Unrecognized command: ");
+ do {
+ if( ' ' < cmd && cmd <= '~' )
+ putchar(cmd);
+ else
+ printf("\\x%x", cmd);
+ cmd = inchar();
+ } while (cmd != '\n');
+ printf(" (type ? for help)\n");
+ break;
+ }
+ }
+}
+
+#ifdef CONFIG_SMP
+static void cpu_cmd(void)
+{
+ unsigned long cpu;
+ int timeout;
+ int cmd;
+
+ cmd = inchar();
+ if (cmd == 'i') {
+ printf("stopping all cpus\n");
+ /* interrupt other cpu(s) */
+ cpu = MSG_ALL_BUT_SELF;
+ smp_send_xmon_break(cpu);
+ return;
+ }
+ termch = cmd;
+ if (!scanhex(&cpu)) {
+ /* print cpus waiting or in xmon */
+ printf("cpus stopped:");
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (test_bit(cpu, &cpus_in_xmon)) {
+ printf(" %d", cpu);
+ if (cpu == smp_processor_id())
+ printf("*", cpu);
+ }
+ }
+ printf("\n");
+ return;
+ }
+ /* try to switch to cpu specified */
+ take_xmon = cpu;
+ timeout = 10000000;
+ while (take_xmon >= 0) {
+ if (--timeout == 0) {
+ /* yes there's a race here */
+ take_xmon = -1;
+ printf("cpu %u didn't take control\n", cpu);
+ return;
+ }
+ }
+ /* now have to wait to be given control back */
+ while (test_and_set_bit(0, &got_xmon)) {
+ if (take_xmon == smp_processor_id()) {
+ take_xmon = -1;
+ break;
+ }
+ }
+}
+#endif /* CONFIG_SMP */
+
+static unsigned short fcstab[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
+ 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
+ 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
+ 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
+ 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
+ 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
+ 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
+ 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
+ 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
+ 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
+ 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
+ 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
+ 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
+ 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
+ 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
+ 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
+ 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
+ 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
+ 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
+ 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
+ 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
+ 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
+ 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
+ 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
+ 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
+ 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
+ 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
+ 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
+ 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
+ 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
+ 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
+ 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
+};
+
+#define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff])
+
+static void
+csum(void)
+{
+ unsigned int i;
+ unsigned short fcs;
+ unsigned char v;
+
+ if (!scanhex(&adrs))
+ return;
+ if (!scanhex(&ncsum))
+ return;
+ fcs = 0xffff;
+ for (i = 0; i < ncsum; ++i) {
+ if (mread(adrs+i, &v, 1) == 0) {
+ printf("csum stopped at %x\n", adrs+i);
+ break;
+ }
+ fcs = FCS(fcs, v);
+ }
+ printf("%x\n", fcs);
+}
+
+static char *breakpoint_help_string =
+ "Breakpoint command usage:\n"
+ "b show breakpoints\n"
+ "b <addr> [cnt] set breakpoint at given instr addr\n"
+ "bc clear all breakpoints\n"
+ "bc <n/addr> clear breakpoint number n or at addr\n"
+ "bi <addr> [cnt] set hardware instr breakpoint (broken?)\n"
+ "bd <addr> [cnt] set hardware data breakpoint (broken?)\n"
+ "";
+
+static void
+bpt_cmds(void)
+{
+ int cmd;
+ unsigned long a;
+ int mode, i;
+ struct bpt *bp;
+ struct tbtable tab;
+
+ cmd = inchar();
+ switch (cmd) {
+ case 'd': /* bd - hardware data breakpoint */
+ if (__is_processor(PV_POWER4)) {
+ printf("Not implemented on POWER4\n");
+ break;
+ }
+ mode = 7;
+ cmd = inchar();
+ if (cmd == 'r')
+ mode = 5;
+ else if (cmd == 'w')
+ mode = 6;
+ else
+ termch = cmd;
+ dabr.address = 0;
+ dabr.count = 0;
+ dabr.enabled = scanhex(&dabr.address);
+ scanhex(&dabr.count);
+ if (dabr.enabled)
+ dabr.address = (dabr.address & ~7) | mode;
+ break;
+ case 'i': /* bi - hardware instr breakpoint */
+ if (__is_processor(PV_POWER4)) {
+ printf("Not implemented on POWER4\n");
+ break;
+ }
+ iabr.address = 0;
+ iabr.count = 0;
+ iabr.enabled = scanhex(&iabr.address);
+ if (iabr.enabled)
+ iabr.address |= 3;
+ scanhex(&iabr.count);
+ break;
+ case 'c':
+ if (!scanhex(&a)) {
+ /* clear all breakpoints */
+ for (i = 0; i < NBPTS; ++i)
+ bpts[i].enabled = 0;
+ iabr.enabled = 0;
+ dabr.enabled = 0;
+ printf("All breakpoints cleared\n");
+ } else {
+ if (a <= NBPTS && a >= 1) {
+ /* assume a breakpoint number */
+ --a; /* bp nums are 1 based */
+ bp = &bpts[a];
+ } else {
+ /* assume a breakpoint address */
+ bp = at_breakpoint(a);
+ }
+ if (bp == 0) {
+ printf("No breakpoint at %x\n", a);
+ } else {
+ printf("Cleared breakpoint %x (%lx %s)\n", (bp - bpts)+1, bp->address, bp->funcname);
+ bp->enabled = 0;
+ }
+ }
+ break;
+ case '?':
+ printf(breakpoint_help_string);
+ break;
+ default:
+ termch = cmd;
+ cmd = skipbl();
+ if (cmd == '?') {
+ printf(breakpoint_help_string);
+ break;
+ }
+ termch = cmd;
+ if (!scanhex(&a)) {
+ /* print all breakpoints */
+ int bpnum;
+
+ printf(" type address count\n");
+ if (dabr.enabled) {
+ printf(" data %.16lx %8x [", dabr.address & ~7,
+ dabr.count);
+ if (dabr.address & 1)
+ printf("r");
+ if (dabr.address & 2)
+ printf("w");
+ printf("]\n");
+ }
+ if (iabr.enabled)
+ printf(" inst %.16lx %8x\n", iabr.address & ~3,
+ iabr.count);
+ for (bp = bpts, bpnum = 1; bp < &bpts[NBPTS]; ++bp, ++bpnum)
+ if (bp->enabled)
+ printf("%2x trap %.16lx %8x %s\n", bpnum, bp->address, bp->count, bp->funcname);
+ break;
+ }
+ bp = at_breakpoint(a);
+ if (bp == 0) {
+ for (bp = bpts; bp < &bpts[NBPTS]; ++bp)
+ if (!bp->enabled)
+ break;
+ if (bp >= &bpts[NBPTS]) {
+ printf("Sorry, no free breakpoints. Please clear one first.\n");
+ break;
+ }
+ }
+ bp->enabled = 1;
+ bp->address = a;
+ bp->count = 0;
+ scanhex(&bp->count);
+ /* Find the function name just once. */
+ bp->funcname[0] = '\0';
+ if (find_tb_table(bp->address, &tab) && tab.name[0]) {
+ /* Got a nice name for it. */
+ int delta = bp->address - tab.funcstart;
+ sprintf(bp->funcname, "%s+0x%x", tab.name, delta);
+ }
+ printf("Set breakpoint %2x trap %.16lx %8x %s\n", (bp-bpts)+1, bp->address, bp->count, bp->funcname);
+ break;
+ }
+}
+
+/* Very cheap human name for vector lookup. */
+static
+const char *getvecname(unsigned long vec)
+{
+ char *ret;
+ switch (vec) {
+ case 0x100: ret = "(System Reset)"; break;
+ case 0x200: ret = "(Machine Check)"; break;
+ case 0x300: ret = "(Data Access)"; break;
+ case 0x400: ret = "(Instruction Access)"; break;
+ case 0x500: ret = "(Hardware Interrupt)"; break;
+ case 0x600: ret = "(Alignment)"; break;
+ case 0x700: ret = "(Program Check)"; break;
+ case 0x800: ret = "(FPU Unavailable)"; break;
+ case 0x900: ret = "(Decrementer)"; break;
+ case 0xc00: ret = "(System Call)"; break;
+ case 0xd00: ret = "(Single Step)"; break;
+ case 0xf00: ret = "(Performance Monitor)"; break;
+ default: ret = "";
+ }
+ return ret;
+}
+
+static void
+backtrace(struct pt_regs *excp)
+{
+ unsigned long sp;
+ unsigned long lr;
+ unsigned long stack[3];
+ struct pt_regs regs;
+ struct tbtable tab;
+ int framecount;
+ char *funcname;
+ /* declare these as raw ptrs so we don't get func descriptors */
+ extern void *ret_from_except, *ret_from_syscall_1;
+
+ if (excp != NULL) {
+ lr = excp->link;
+ sp = excp->gpr[1];
+ } else {
+ /* Use care not to call any function before this point
+ so the saved lr has a chance of being good. */
+ asm volatile ("mflr %0" : "=r" (lr) :);
+ sp = getsp();
+ }
+ scanhex(&sp);
+ scannl();
+ for (framecount = 0;
+ sp != 0 && framecount < MAXFRAMECOUNT;
+ sp = stack[0], framecount++) {
+ if (mread(sp, stack, sizeof(stack)) != sizeof(stack))
+ break;
+#if 0
+ if (lr != 0) {
+ stack[2] = lr; /* fake out the first saved lr. It may not be saved yet. */
+ lr = 0;
+ }
+#endif
+ printf("%.16lx %.16lx", sp, stack[2]);
+ /* TAI -- for now only the ones cast to unsigned long will match.
+ * Need to test the rest...
+ */
+ if ((stack[2] == (unsigned long)ret_from_except &&
+ (funcname = "ret_from_except"))
+ || (stack[2] == (unsigned long)ret_from_syscall_1 &&
+ (funcname = "ret_from_syscall_1"))
+#if 0
+ || stack[2] == (unsigned) &ret_from_syscall_2
+ || stack[2] == (unsigned) &do_signal_ret
+#endif
+ ) {
+ printf(" %s\n", funcname);
+ if (mread(sp+112, ®s, sizeof(regs)) != sizeof(regs))
+ break;
+ printf("exception: %lx %s regs %lx\n", regs.trap, getvecname(regs.trap), sp+112);
+ printf(" %.16lx", regs.nip);
+ if ((regs.nip & 0xffffffff00000000UL) &&
+ find_tb_table(regs.nip, &tab)) {
+ int delta = regs.nip-tab.funcstart;
+ if (delta < 0)
+ printf(" <unknown code>");
+ else
+ printf(" %s+0x%x", tab.name, delta);
+ }
+ printf("\n");
+ if (regs.gpr[1] < sp) {
+ printf("<Stack drops into 32-bit userspace %.16lx>\n", regs.gpr[1]);
+ break;
+ }
+
+ sp = regs.gpr[1];
+ if (mread(sp, stack, sizeof(stack)) != sizeof(stack))
+ break;
+ } else {
+ if (stack[2] && find_tb_table(stack[2], &tab)) {
+ int delta = stack[2]-tab.funcstart;
+ if (delta < 0)
+ printf(" <unknown code>");
+ else
+ printf(" %s+0x%x", tab.name, delta);
+ }
+ printf("\n");
+ }
+ if (stack[0] && stack[0] <= sp) {
+ if ((stack[0] & 0xffffffff00000000UL) == 0)
+ printf("<Stack drops into 32-bit userspace %.16lx>\n", stack[0]);
+ else
+ printf("<Corrupt stack. Next backchain is %.16lx>\n", stack[0]);
+ break;
+ }
+ }
+ if (framecount >= MAXFRAMECOUNT)
+ printf("<Punt. Too many stack frames>\n");
+}
+
+int
+getsp()
+{
+ int x;
+
+ asm("mr %0,1" : "=r" (x) :);
+ return x;
+}
+
+spinlock_t exception_print_lock = SPIN_LOCK_UNLOCKED;
+
+void
+excprint(struct pt_regs *fp)
+{
+ struct task_struct *c;
+ struct tbtable tab;
+ unsigned long flags;
+
+ spin_lock_irqsave(&exception_print_lock, flags);
+
+#ifdef CONFIG_SMP
+ printf("cpu %d: ", smp_processor_id());
+#endif /* CONFIG_SMP */
+
+ printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(fp->trap), fp);
+ printf(" pc: %lx", fp->nip);
+ if (find_tb_table(fp->nip, &tab) && tab.name[0]) {
+ /* Got a nice name for it */
+ int delta = fp->nip - tab.funcstart;
+ printf(" (%s+0x%x)", tab.name, delta);
+ }
+ printf("\n");
+ printf(" lr: %lx", fp->link);
+ if (find_tb_table(fp->link, &tab) && tab.name[0]) {
+ /* Got a nice name for it */
+ int delta = fp->link - tab.funcstart;
+ printf(" (%s+0x%x)", tab.name, delta);
+ }
+ printf("\n");
+ printf(" sp: %lx\n", fp->gpr[1]);
+ printf(" msr: %lx\n", fp->msr);
+
+ if (fp->trap == 0x300 || fp->trap == 0x600) {
+ printf(" dar: %lx\n", fp->dar);
+ printf(" dsisr: %lx\n", fp->dsisr);
+ }
+
+ /* XXX: need to copy current or we die. Why? */
+ c = current;
+ printf(" current = 0x%lx\n", c);
+ printf(" paca = 0x%lx\n", get_paca());
+ if (c) {
+ printf(" current = %lx, pid = %ld, comm = %s\n",
+ c, c->pid, c->comm);
+ }
+
+ spin_unlock_irqrestore(&exception_print_lock, flags);
+}
+
+void
+prregs(struct pt_regs *fp)
+{
+ int n;
+ unsigned long base;
+
+ if (scanhex((void *)&base))
+ fp = (struct pt_regs *) base;
+ for (n = 0; n < 16; ++n)
+ printf("R%.2ld = %.16lx R%.2ld = %.16lx\n", n, fp->gpr[n],
+ n+16, fp->gpr[n+16]);
+ printf("pc = %.16lx msr = %.16lx\nlr = %.16lx cr = %.16lx\n",
+ fp->nip, fp->msr, fp->link, fp->ccr);
+ printf("ctr = %.16lx xer = %.16lx trap = %8lx\n",
+ fp->ctr, fp->xer, fp->trap);
+}
+
+void
+cacheflush(void)
+{
+ int cmd;
+ unsigned long nflush;
+
+ cmd = inchar();
+ if (cmd != 'i')
+ termch = cmd;
+ scanhex((void *)&adrs);
+ if (termch != '\n')
+ termch = 0;
+ nflush = 1;
+ scanhex(&nflush);
+ nflush = (nflush + 31) / 32;
+ if (cmd != 'i') {
+ for (; nflush > 0; --nflush, adrs += 0x20)
+ cflush((void *) adrs);
+ } else {
+ for (; nflush > 0; --nflush, adrs += 0x20)
+ cinval((void *) adrs);
+ }
+}
+
+unsigned long
+read_spr(int n)
+{
+ unsigned int instrs[2];
+ unsigned long (*code)(void);
+ unsigned long opd[3];
+
+ instrs[0] = 0x7c6002a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6);
+ instrs[1] = 0x4e800020;
+ opd[0] = instrs;
+ opd[1] = 0;
+ opd[2] = 0;
+ store_inst(instrs);
+ store_inst(instrs+1);
+ code = (unsigned long (*)(void)) opd;
+
+ return code();
+}
+
+void
+write_spr(int n, unsigned long val)
+{
+ unsigned int instrs[2];
+ unsigned long (*code)(unsigned long);
+ unsigned long opd[3];
+
+ instrs[0] = 0x7c6003a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6);
+ instrs[1] = 0x4e800020;
+ opd[0] = instrs;
+ opd[1] = 0;
+ opd[2] = 0;
+ store_inst(instrs);
+ store_inst(instrs+1);
+ code = (unsigned long (*)(unsigned long)) opd;
+
+ code(val);
+}
+
+static unsigned long regno;
+extern char exc_prolog;
+extern char dec_exc;
+
+void
+print_sysmap(void)
+{
+ extern char *sysmap;
+ if ( sysmap )
+ printf("System.map: \n%s", sysmap);
+}
+
+void
+super_regs()
+{
+ int i, cmd;
+ unsigned long val;
+ struct Paca* ptrPaca = NULL;
+ struct ItLpPaca* ptrLpPaca = NULL;
+ struct ItLpRegSave* ptrLpRegSave = NULL;
+
+ cmd = skipbl();
+ if (cmd == '\n') {
+ unsigned long sp, toc;
+ asm("mr %0,1" : "=r" (sp) :);
+ asm("mr %0,2" : "=r" (toc) :);
+
+ printf("msr = %.16lx sprg0= %.16lx\n", get_msr(), get_sprg0());
+ printf("pvr = %.16lx sprg1= %.16lx\n", get_pvr(), get_sprg1());
+ printf("dec = %.16lx sprg2= %.16lx\n", get_dec(), get_sprg2());
+ printf("sp = %.16lx sprg3= %.16lx\n", sp, get_sprg3());
+ printf("toc = %.16lx dar = %.16lx\n", toc, get_dar());
+ printf("srr0 = %.16lx srr1 = %.16lx\n", get_srr0(), get_srr1());
+ printf("asr = %.16lx\n", mfasr());
+ for (i = 0; i < 8; ++i)
+ printf("sr%.2ld = %.16lx sr%.2ld = %.16lx\n", i, get_sr(i), i+8, get_sr(i+8));
+
+ // Dump out relevant Paca data areas.
+ printf("Paca: \n");
+ ptrPaca = (struct Paca*)get_sprg3();
+
+ printf(" Local Processor Control Area (LpPaca): \n");
+ ptrLpPaca = ptrPaca->xLpPacaPtr;
+ printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n", ptrLpPaca->xSavedSrr0, ptrLpPaca->xSavedSrr1);
+ printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n", ptrLpPaca->xSavedGpr3, ptrLpPaca->xSavedGpr4);
+ printf(" Saved Gpr5=%.16lx \n", ptrLpPaca->xSavedGpr5);
+
+ printf(" Local Processor Register Save Area (LpRegSave): \n");
+ ptrLpRegSave = ptrPaca->xLpRegSavePtr;
+ printf(" Saved Sprg0=%.16lx Saved Sprg1=%.16lx \n", ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
+ printf(" Saved Sprg2=%.16lx Saved Sprg3=%.16lx \n", ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
+ printf(" Saved Msr =%.16lx Saved Nia =%.16lx \n", ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+
+ return;
+ }
+
+ scanhex(®no);
+ switch (cmd) {
+ case 'w':
+ val = read_spr(regno);
+ scanhex(&val);
+ write_spr(regno, val);
+ /* fall through */
+ case 'r':
+ printf("spr %lx = %lx\n", regno, read_spr(regno));
+ break;
+ case 's':
+ val = get_sr(regno);
+ scanhex(&val);
+ set_sr(regno, val);
+ break;
+ case 'm':
+ val = get_msr();
+ scanhex(&val);
+ set_msrd(val);
+ break;
+ }
+ scannl();
+}
+
+#if 0
+static void
+openforth()
+{
+ int c;
+ char *p;
+ char cmd[1024];
+ int args[5];
+ extern int (*prom_entry)(int *);
+
+ p = cmd;
+ c = skipbl();
+ while (c != '\n') {
+ *p++ = c;
+ c = inchar();
+ }
+ *p = 0;
+ args[0] = (int) "interpret";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (int) cmd;
+ (*prom_entry)(args);
+ printf("\n");
+ if (args[4] != 0)
+ printf("error %x\n", args[4]);
+}
+#endif
+
+#ifndef CONFIG_PPC64BRIDGE
+static void
+dump_hash_table_seg(unsigned seg, unsigned start, unsigned end)
+{
+ extern void *Hash;
+ extern unsigned long Hash_size;
+ unsigned *htab = Hash;
+ unsigned hsize = Hash_size;
+ unsigned v, hmask, va, last_va;
+ int found, last_found, i;
+ unsigned *hg, w1, last_w2, last_va0;
+
+ last_found = 0;
+ hmask = hsize / 64 - 1;
+ va = start;
+ start = (start >> 12) & 0xffff;
+ end = (end >> 12) & 0xffff;
+ for (v = start; v < end; ++v) {
+ found = 0;
+ hg = htab + (((v ^ seg) & hmask) * 16);
+ w1 = 0x80000000 | (seg << 7) | (v >> 10);
+ for (i = 0; i < 8; ++i, hg += 2) {
+ if (*hg == w1) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ w1 ^= 0x40;
+ hg = htab + ((~(v ^ seg) & hmask) * 16);
+ for (i = 0; i < 8; ++i, hg += 2) {
+ if (*hg == w1) {
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!(last_found && found && (hg[1] & ~0x180) == last_w2 + 4096)) {
+ if (last_found) {
+ if (last_va != last_va0)
+ printf(" ... %x", last_va);
+ printf("\n");
+ }
+ if (found) {
+ printf("%x to %x", va, hg[1]);
+ last_va0 = va;
+ }
+ last_found = found;
+ }
+ if (found) {
+ last_w2 = hg[1] & ~0x180;
+ last_va = va;
+ }
+ va += 4096;
+ }
+ if (last_found)
+ printf(" ... %x\n", last_va);
+}
+
+#else /* CONFIG_PPC64BRIDGE */
+static void
+dump_hash_table_seg(unsigned seg, unsigned start, unsigned end)
+{
+ extern void *Hash;
+ extern unsigned long Hash_size;
+ unsigned *htab = Hash;
+ unsigned hsize = Hash_size;
+ unsigned v, hmask, va, last_va;
+ int found, last_found, i;
+ unsigned *hg, w1, last_w2, last_va0;
+
+ last_found = 0;
+ hmask = hsize / 128 - 1;
+ va = start;
+ start = (start >> 12) & 0xffff;
+ end = (end >> 12) & 0xffff;
+ for (v = start; v < end; ++v) {
+ found = 0;
+ hg = htab + (((v ^ seg) & hmask) * 32);
+ w1 = 1 | (seg << 12) | ((v & 0xf800) >> 4);
+ for (i = 0; i < 8; ++i, hg += 4) {
+ if (hg[1] == w1) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ w1 ^= 2;
+ hg = htab + ((~(v ^ seg) & hmask) * 32);
+ for (i = 0; i < 8; ++i, hg += 4) {
+ if (hg[1] == w1) {
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!(last_found && found && (hg[3] & ~0x180) == last_w2 + 4096)) {
+ if (last_found) {
+ if (last_va != last_va0)
+ printf(" ... %x", last_va);
+ printf("\n");
+ }
+ if (found) {
+ printf("%x to %x", va, hg[3]);
+ last_va0 = va;
+ }
+ last_found = found;
+ }
+ if (found) {
+ last_w2 = hg[3] & ~0x180;
+ last_va = va;
+ }
+ va += 4096;
+ }
+ if (last_found)
+ printf(" ... %x\n", last_va);
+}
+#endif /* CONFIG_PPC64BRIDGE */
+
+static unsigned long hash_ctx;
+static unsigned long hash_start;
+static unsigned long hash_end;
+
+static void
+dump_hash_table()
+{
+ int seg;
+ unsigned seg_start, seg_end;
+
+ hash_ctx = 0;
+ hash_start = 0;
+ hash_end = 0xfffff000;
+ scanhex(&hash_ctx);
+ scanhex(&hash_start);
+ scanhex(&hash_end);
+ printf("Mappings for context %x\n", hash_ctx);
+ seg_start = hash_start;
+ for (seg = hash_start >> 28; seg <= hash_end >> 28; ++seg) {
+ seg_end = (seg << 28) | 0x0ffff000;
+ if (seg_end > hash_end)
+ seg_end = hash_end;
+ dump_hash_table_seg((hash_ctx << 4) + seg, seg_start, seg_end);
+ seg_start = seg_end + 0x1000;
+ }
+}
+
+int
+mread(unsigned long adrs, void *buf, int size)
+{
+ volatile int n;
+ char *p, *q;
+
+ n = 0;
+ if( setjmp(bus_error_jmp) == 0 ){
+ debugger_fault_handler = handle_fault;
+ sync();
+ p = (char *) adrs;
+ q = (char *) buf;
+ switch (size) {
+ case 2: *(short *)q = *(short *)p; break;
+ case 4: *(int *)q = *(int *)p; break;
+ default:
+ for( ; n < size; ++n ) {
+ *q++ = *p++;
+ sync();
+ }
+ }
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ n = size;
+ }
+ debugger_fault_handler = 0;
+ return n;
+}
+
+int
+mwrite(unsigned long adrs, void *buf, int size)
+{
+ volatile int n;
+ char *p, *q;
+
+ n = 0;
+ if( setjmp(bus_error_jmp) == 0 ){
+ debugger_fault_handler = handle_fault;
+ sync();
+ p = (char *) adrs;
+ q = (char *) buf;
+ switch (size) {
+ case 2: *(short *)p = *(short *)q; break;
+ case 4: *(int *)p = *(int *)q; break;
+ default:
+ for( ; n < size; ++n ) {
+ *p++ = *q++;
+ sync();
+ }
+ }
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ n = size;
+ } else {
+ printf("*** Error writing address %x\n", adrs + n);
+ }
+ debugger_fault_handler = 0;
+ return n;
+}
+
+static int fault_type;
+static char *fault_chars[] = { "--", "**", "##" };
+
+static void
+handle_fault(struct pt_regs *regs)
+{
+ fault_type = regs->trap == 0x200? 0: regs->trap == 0x300? 1: 2;
+ longjmp(bus_error_jmp, 1);
+}
+
+#define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t))
+
+void
+byterev(unsigned char *val, int size)
+{
+ int t;
+
+ switch (size) {
+ case 2:
+ SWAP(val[0], val[1], t);
+ break;
+ case 4:
+ SWAP(val[0], val[3], t);
+ SWAP(val[1], val[2], t);
+ break;
+ case 8: /* is there really any use for this? */
+ SWAP(val[0], val[7], t);
+ SWAP(val[1], val[6], t);
+ SWAP(val[2], val[5], t);
+ SWAP(val[3], val[4], t);
+ break;
+ }
+}
+
+static int brev;
+static int mnoread;
+
+static char *memex_help_string =
+ "Memory examine command usage:\n"
+ "m [addr] [flags] examine/change memory\n"
+ " addr is optional. will start where left off.\n"
+ " flags may include chars from this set:\n"
+ " b modify by bytes (default)\n"
+ " w modify by words (2 byte)\n"
+ " l modify by longs (4 byte)\n"
+ " d modify by doubleword (8 byte)\n"
+ " r toggle reverse byte order mode\n"
+ " n do not read memory (for i/o spaces)\n"
+ " . ok to read (default)\n"
+ "NOTE: flags are saved as defaults\n"
+ "";
+
+static char *memex_subcmd_help_string =
+ "Memory examine subcommands:\n"
+ " hexval write this val to current location\n"
+ " 'string' write chars from string to this location\n"
+ " ' increment address\n"
+ " ^ decrement address\n"
+ " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n"
+ " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n"
+ " ` clear no-read flag\n"
+ " ; stay at this addr\n"
+ " v change to byte mode\n"
+ " w change to word (2 byte) mode\n"
+ " l change to long (4 byte) mode\n"
+ " u change to doubleword (8 byte) mode\n"
+ " m addr change current addr\n"
+ " n toggle no-read flag\n"
+ " r toggle byte reverse flag\n"
+ " < count back up count bytes\n"
+ " > count skip forward count bytes\n"
+ " x exit this mode\n"
+ "";
+
+void
+memex()
+{
+ int cmd, inc, i, nslash;
+ unsigned long n;
+ unsigned char val[16];
+
+ scanhex((void *)&adrs);
+ cmd = skipbl();
+ if (cmd == '?') {
+ printf(memex_help_string);
+ return;
+ } else {
+ termch = cmd;
+ }
+ last_cmd = "m\n";
+ while ((cmd = skipbl()) != '\n') {
+ switch( cmd ){
+ case 'b': size = 1; break;
+ case 'w': size = 2; break;
+ case 'l': size = 4; break;
+ case 'd': size = 8; break;
+ case 'r': brev = !brev; break;
+ case 'n': mnoread = 1; break;
+ case '.': mnoread = 0; break;
+ }
+ }
+ if( size <= 0 )
+ size = 1;
+ else if( size > 8 )
+ size = 8;
+ for(;;){
+ if (!mnoread)
+ n = mread(adrs, val, size);
+ printf("%.16x%c", adrs, brev? 'r': ' ');
+ if (!mnoread) {
+ if (brev)
+ byterev(val, size);
+ putchar(' ');
+ for (i = 0; i < n; ++i)
+ printf("%.2x", val[i]);
+ for (; i < size; ++i)
+ printf("%s", fault_chars[fault_type]);
+ }
+ putchar(' ');
+ inc = size;
+ nslash = 0;
+ for(;;){
+ if( scanhex(&n) ){
+ for (i = 0; i < size; ++i)
+ val[i] = n >> (i * 8);
+ if (!brev)
+ byterev(val, size);
+ mwrite(adrs, val, size);
+ inc = size;
+ }
+ cmd = skipbl();
+ if (cmd == '\n')
+ break;
+ inc = 0;
+ switch (cmd) {
+ case '\'':
+ for(;;){
+ n = inchar();
+ if( n == '\\' )
+ n = bsesc();
+ else if( n == '\'' )
+ break;
+ for (i = 0; i < size; ++i)
+ val[i] = n >> (i * 8);
+ if (!brev)
+ byterev(val, size);
+ mwrite(adrs, val, size);
+ adrs += size;
+ }
+ adrs -= size;
+ inc = size;
+ break;
+ case ',':
+ adrs += size;
+ break;
+ case '.':
+ mnoread = 0;
+ break;
+ case ';':
+ break;
+ case 'x':
+ case EOF:
+ scannl();
+ return;
+ case 'b':
+ case 'v':
+ size = 1;
+ break;
+ case 'w':
+ size = 2;
+ break;
+ case 'l':
+ size = 4;
+ break;
+ case 'u':
+ size = 8;
+ break;
+ case '^':
+ adrs -= size;
+ break;
+ break;
+ case '/':
+ if (nslash > 0)
+ adrs -= 1 << nslash;
+ else
+ nslash = 0;
+ nslash += 4;
+ adrs += 1 << nslash;
+ break;
+ case '\\':
+ if (nslash < 0)
+ adrs += 1 << -nslash;
+ else
+ nslash = 0;
+ nslash -= 4;
+ adrs -= 1 << -nslash;
+ break;
+ case 'm':
+ scanhex((void *)&adrs);
+ break;
+ case 'n':
+ mnoread = 1;
+ break;
+ case 'r':
+ brev = !brev;
+ break;
+ case '<':
+ n = size;
+ scanhex(&n);
+ adrs -= n;
+ break;
+ case '>':
+ n = size;
+ scanhex(&n);
+ adrs += n;
+ break;
+ case '?':
+ printf(memex_subcmd_help_string);
+ break;
+ }
+ }
+ adrs += inc;
+ }
+}
+
+int
+bsesc()
+{
+ int c;
+
+ c = inchar();
+ switch( c ){
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 'b': c = '\b'; break;
+ case 't': c = '\t'; break;
+ }
+ return c;
+}
+
+#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
+ || ('a' <= (c) && (c) <= 'f') \
+ || ('A' <= (c) && (c) <= 'F'))
+void
+dump()
+{
+ int c;
+
+ c = inchar();
+ if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
+ termch = c;
+ scanhex((void *)&adrs);
+ if( termch != '\n')
+ termch = 0;
+ if( c == 'i' ){
+ scanhex(&nidump);
+ if( nidump == 0 )
+ nidump = 16;
+ adrs += ppc_inst_dump(adrs, nidump);
+ last_cmd = "di\n";
+ } else {
+ scanhex(&ndump);
+ if( ndump == 0 )
+ ndump = 64;
+ prdump(adrs, ndump);
+ adrs += ndump;
+ last_cmd = "d\n";
+ }
+}
+
+void
+prdump(unsigned long adrs, long ndump)
+{
+ long n, m, c, r, nr;
+ unsigned char temp[16];
+
+ for( n = ndump; n > 0; ){
+ printf("%.16lx", adrs);
+ putchar(' ');
+ r = n < 16? n: 16;
+ nr = mread(adrs, temp, r);
+ adrs += nr;
+ for( m = 0; m < r; ++m ){
+ if ((m & 7) == 0 && m > 0)
+ putchar(' ');
+ if( m < nr )
+ printf("%.2x", temp[m]);
+ else
+ printf("%s", fault_chars[fault_type]);
+ }
+ for(; m < 16; ++m )
+ printf(" ");
+ printf(" |");
+ for( m = 0; m < r; ++m ){
+ if( m < nr ){
+ c = temp[m];
+ putchar(' ' <= c && c <= '~'? c: '.');
+ } else
+ putchar(' ');
+ }
+ n -= r;
+ for(; m < 16; ++m )
+ putchar(' ');
+ printf("|\n");
+ if( nr < r )
+ break;
+ }
+}
+
+int
+ppc_inst_dump(unsigned long adr, long count)
+{
+ int nr, dotted;
+ unsigned long first_adr;
+ unsigned long inst, last_inst;
+ unsigned char val[4];
+
+ dotted = 0;
+ for (first_adr = adr; count > 0; --count, adr += 4){
+ nr = mread(adr, val, 4);
+ if( nr == 0 ){
+ const char *x = fault_chars[fault_type];
+ printf("%.16lx %s%s%s%s\n", adr, x, x, x, x);
+ break;
+ }
+ inst = GETWORD(val);
+ if (adr > first_adr && inst == last_inst) {
+ if (!dotted) {
+ printf(" ...\n");
+ dotted = 1;
+ }
+ continue;
+ }
+ dotted = 0;
+ last_inst = inst;
+ printf("%.16lx ", adr);
+ printf("%.8x\t", inst);
+ print_insn_big_powerpc(stdout, inst, adr); /* always returns 4 */
+ printf("\n");
+ }
+ return adr - first_adr;
+}
+
+void
+print_address(unsigned long addr)
+{
+ printf("0x%lx", addr);
+}
+
+/*
+ * Memory operations - move, set, print differences
+ */
+static unsigned long mdest; /* destination address */
+static unsigned long msrc; /* source address */
+static unsigned long mval; /* byte value to set memory to */
+static unsigned long mcount; /* # bytes to affect */
+static unsigned long mdiffs; /* max # differences to print */
+
+void
+memops(int cmd)
+{
+ scanhex((void *)&mdest);
+ if( termch != '\n' )
+ termch = 0;
+ scanhex((void *)(cmd == 's'? &mval: &msrc));
+ if( termch != '\n' )
+ termch = 0;
+ scanhex((void *)&mcount);
+ switch( cmd ){
+ case 'm':
+ memmove((void *)mdest, (void *)msrc, mcount);
+ break;
+ case 's':
+ memset((void *)mdest, mval, mcount);
+ break;
+ case 'd':
+ if( termch != '\n' )
+ termch = 0;
+ scanhex((void *)&mdiffs);
+ memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs);
+ break;
+ }
+}
+
+void
+memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr)
+{
+ unsigned n, prt;
+
+ prt = 0;
+ for( n = nb; n > 0; --n )
+ if( *p1++ != *p2++ )
+ if( ++prt <= maxpr )
+ printf("%.16x %.2x # %.16x %.2x\n", p1 - 1,
+ p1[-1], p2 - 1, p2[-1]);
+ if( prt > maxpr )
+ printf("Total of %d differences\n", prt);
+}
+
+static unsigned mend;
+static unsigned mask;
+
+void
+memlocate()
+{
+ unsigned a, n;
+ unsigned char val[4];
+
+ last_cmd = "ml";
+ scanhex((void *)&mdest);
+ if (termch != '\n') {
+ termch = 0;
+ scanhex((void *)&mend);
+ if (termch != '\n') {
+ termch = 0;
+ scanhex((void *)&mval);
+ mask = ~0;
+ if (termch != '\n') termch = 0;
+ scanhex((void *)&mask);
+ }
+ }
+ n = 0;
+ for (a = mdest; a < mend; a += 4) {
+ if (mread(a, val, 4) == 4
+ && ((GETWORD(val) ^ mval) & mask) == 0) {
+ printf("%.16x: %.16x\n", a, GETWORD(val));
+ if (++n >= 10)
+ break;
+ }
+ }
+}
+
+static unsigned long mskip = 0x1000;
+static unsigned long mlim = 0xffffffff;
+
+void
+memzcan()
+{
+ unsigned char v;
+ unsigned a;
+ int ok, ook;
+
+ scanhex(&mdest);
+ if (termch != '\n') termch = 0;
+ scanhex(&mskip);
+ if (termch != '\n') termch = 0;
+ scanhex(&mlim);
+ ook = 0;
+ for (a = mdest; a < mlim; a += mskip) {
+ ok = mread(a, &v, 1);
+ if (ok && !ook) {
+ printf("%.8x .. ", a);
+ fflush(stdout);
+ } else if (!ok && ook)
+ printf("%.8x\n", a - mskip);
+ ook = ok;
+ if (a + mskip < a)
+ break;
+ }
+ if (ook)
+ printf("%.8x\n", a - mskip);
+}
+
+/* Input scanning routines */
+int
+skipbl()
+{
+ int c;
+
+ if( termch != 0 ){
+ c = termch;
+ termch = 0;
+ } else
+ c = inchar();
+ while( c == ' ' || c == '\t' )
+ c = inchar();
+ return c;
+}
+
+int
+scanhex(vp)
+unsigned long *vp;
+{
+ int c, d;
+ unsigned long v;
+
+ c = skipbl();
+ d = hexdigit(c);
+ if( d == EOF ){
+ termch = c;
+ return 0;
+ }
+ v = 0;
+ do {
+ v = (v << 4) + d;
+ c = inchar();
+ d = hexdigit(c);
+ } while( d != EOF );
+ termch = c;
+ *vp = v;
+ return 1;
+}
+
+void
+scannl()
+{
+ int c;
+
+ c = termch;
+ termch = 0;
+ while( c != '\n' )
+ c = inchar();
+}
+
+int
+hexdigit(int c)
+{
+ if( '0' <= c && c <= '9' )
+ return c - '0';
+ if( 'A' <= c && c <= 'F' )
+ return c - ('A' - 10);
+ if( 'a' <= c && c <= 'f' )
+ return c - ('a' - 10);
+ return EOF;
+}
+
+void
+getstring(char *s, int size)
+{
+ int c;
+
+ c = skipbl();
+ do {
+ if( size > 1 ){
+ *s++ = c;
+ --size;
+ }
+ c = inchar();
+ } while( c != ' ' && c != '\t' && c != '\n' );
+ termch = c;
+ *s = 0;
+}
+
+static char line[256];
+static char *lineptr;
+
+void
+flush_input()
+{
+ lineptr = NULL;
+}
+
+int
+inchar()
+{
+ if (lineptr == NULL || *lineptr == 0) {
+ if (fgets(line, sizeof(line), stdin) == NULL) {
+ lineptr = NULL;
+ return EOF;
+ }
+ lineptr = line;
+ }
+ return *lineptr++;
+}
+
+void
+take_input(str)
+char *str;
+{
+ lineptr = str;
+}
+
+
+/* Starting at codeaddr scan forward for a tbtable and fill in the
+ given table. Return non-zero if successful at doing something.
+ */
+static int
+find_tb_table(unsigned long codeaddr, struct tbtable *tab)
+{
+ unsigned long codeaddr_max;
+ unsigned long tbtab_start;
+ int nr;
+ int instr;
+ int num_parms;
+
+ if (tab == NULL)
+ return 0;
+ memset(tab, 0, sizeof(tab));
+
+ /* Scan instructions starting at codeaddr for 128k max */
+ for (codeaddr_max = codeaddr + 128*1024*4;
+ codeaddr < codeaddr_max;
+ codeaddr += 4) {
+ nr = mread(codeaddr, &instr, 4);
+ if (nr != 4)
+ return 0; /* Bad read. Give up promptly. */
+ if (instr == 0) {
+ /* table should follow. */
+ int version;
+ unsigned long flags;
+ tbtab_start = codeaddr; /* save it to compute func start addr */
+ codeaddr += 4;
+ nr = mread(codeaddr, &flags, 8);
+ if (nr != 8)
+ return 0; /* Bad read or no tb table. */
+ tab->flags = flags;
+ version = (flags >> 56) & 0xff;
+ if (version != 0)
+ continue; /* No tb table here. */
+ /* Now, like the version, some of the flags are values
+ that are more conveniently extracted... */
+ tab->fp_saved = (flags >> 24) & 0x3f;
+ tab->gpr_saved = (flags >> 16) & 0x3f;
+ tab->fixedparms = (flags >> 8) & 0xff;
+ tab->floatparms = (flags >> 1) & 0x7f;
+ codeaddr += 8;
+ num_parms = tab->fixedparms + tab->floatparms;
+ if (num_parms) {
+ unsigned int parminfo;
+ int parm;
+ if (num_parms > 32)
+ return 1; /* incomplete */
+ nr = mread(codeaddr, &parminfo, 4);
+ if (nr != 4)
+ return 1; /* incomplete */
+ /* decode parminfo...32 bits.
+ A zero means fixed. A one means float and the
+ following bit determines single (0) or double (1).
+ */
+ for (parm = 0; parm < num_parms; parm++) {
+ if (parminfo & 0x80000000) {
+ parminfo <<= 1;
+ if (parminfo & 0x80000000)
+ tab->parminfo[parm] = TBTAB_PARMDFLOAT;
+ else
+ tab->parminfo[parm] = TBTAB_PARMSFLOAT;
+ } else {
+ tab->parminfo[parm] = TBTAB_PARMFIXED;
+ }
+ parminfo <<= 1;
+ }
+ codeaddr += 4;
+ }
+ if (flags & TBTAB_FLAGSHASTBOFF) {
+ nr = mread(codeaddr, &tab->tb_offset, 4);
+ if (nr != 4)
+ return 1; /* incomplete */
+ if (tab->tb_offset > 0) {
+ tab->funcstart = tbtab_start - tab->tb_offset;
+ }
+ codeaddr += 4;
+ }
+ /* hand_mask appears to be always be omitted. */
+ if (flags & TBTAB_FLAGSHASCTL) {
+ /* Assume this will never happen for C or asm */
+ return 1; /* incomplete */
+ }
+ if (flags & TBTAB_FLAGSNAMEPRESENT) {
+ short namlen;
+ nr = mread(codeaddr, &namlen, 2);
+ if (nr != 2)
+ return 1; /* incomplete */
+ if (namlen >= sizeof(tab->name))
+ namlen = sizeof(tab->name)-1;
+ codeaddr += 2;
+ nr = mread(codeaddr, tab->name, namlen);
+ tab->name[namlen] = '\0';
+ codeaddr += namlen;
+ }
+ return 1;
+ }
+ }
+ return 0; /* hit max...sorry. */
+}
+
+void
+mem_translate()
+{
+ int c;
+ unsigned long ea, va, vsid, vpn, page, hpteg_slot_primary, hpteg_slot_secondary, primary_hash, i, *steg, esid, stabl;
+ HPTE * hpte;
+ struct mm_struct * mm;
+ pte_t *ptep = NULL;
+ void * pgdir;
+
+ c = inchar();
+ if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
+ termch = c;
+ scanhex((void *)&ea);
+
+ if ((ea >= KRANGE_START) && (ea <= (KRANGE_START + (1UL<<60)))) {
+ ptep = 0;
+ vsid = get_kernel_vsid(ea);
+ va = ( vsid << 28 ) | ( ea & 0x0fffffff );
+ } else {
+ // if in vmalloc range, use the vmalloc page directory
+ if ( ( ea >= VMALLOC_START ) && ( ea <= VMALLOC_END ) ) {
+ mm = &init_mm;
+ vsid = get_kernel_vsid( ea );
+ }
+ // if in ioremap range, use the ioremap page directory
+ else if ( ( ea >= IMALLOC_START ) && ( ea <= IMALLOC_END ) ) {
+ mm = &ioremap_mm;
+ vsid = get_kernel_vsid( ea );
+ }
+ // if in user range, use the current task's page directory
+ else if ( ( ea >= USER_START ) && ( ea <= USER_END ) ) {
+ mm = current->mm;
+ vsid = get_vsid(mm->context, ea );
+ }
+ pgdir = mm->pgd;
+ va = ( vsid << 28 ) | ( ea & 0x0fffffff );
+ ptep = find_linux_pte( pgdir, ea );
+ }
+
+ vpn = ((vsid << 28) | (((ea) & 0xFFFF000))) >> 12;
+ page = vpn & 0xffff;
+ esid = (ea >> 28) & 0xFFFFFFFFF;
+
+ // Search the primary group for an available slot
+ primary_hash = ( vsid & 0x7fffffffff ) ^ page;
+ hpteg_slot_primary = ( primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+ hpteg_slot_secondary = ( ~primary_hash & htab_data.htab_hash_mask ) * HPTES_PER_GROUP;
+
+ printf("ea : %.16lx\n", ea);
+ printf("esid : %.16lx\n", esid);
+ printf("vsid : %.16lx\n", vsid);
+
+ printf("\nSoftware Page Table\n-------------------\n");
+ printf("ptep : %.16lx\n", ((unsigned long *)ptep));
+ if(ptep) {
+ printf("*ptep : %.16lx\n", *((unsigned long *)ptep));
+ }
+
+ hpte = htab_data.htab + hpteg_slot_primary;
+ printf("\nHardware Page Table\n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("slot primary : %.16lx\n", hpteg_slot_primary);
+ printf("slot secondary : %.16lx\n", hpteg_slot_secondary);
+ printf("\nPrimary Group\n");
+ for (i=0; i<8; ++i) {
+ if ( hpte->dw0.dw0.v != 0 ) {
+ printf("%d: (hpte)%.16lx %.16lx\n", i, hpte->dw0.dword0, hpte->dw1.dword1);
+ printf(" vsid: %.13lx api: %.2lx hash: %.1lx\n",
+ (hpte->dw0.dw0.avpn)>>5,
+ (hpte->dw0.dw0.avpn) & 0x1f,
+ (hpte->dw0.dw0.h));
+ printf(" rpn: %.13lx \n", (hpte->dw1.dw1.rpn));
+ printf(" pp: %.1lx \n",
+ ((hpte->dw1.dw1.pp0)<<2)|(hpte->dw1.dw1.pp));
+ printf(" wimgn: %.2lx reference: %.1lx change: %.1lx\n",
+ ((hpte->dw1.dw1.w)<<4)|
+ ((hpte->dw1.dw1.i)<<3)|
+ ((hpte->dw1.dw1.m)<<2)|
+ ((hpte->dw1.dw1.g)<<1)|
+ ((hpte->dw1.dw1.n)<<0),
+ hpte->dw1.dw1.r, hpte->dw1.dw1.c);
+ }
+ hpte++;
+ }
+
+ printf("\nSecondary Group\n");
+ // Search the secondary group
+ hpte = htab_data.htab + hpteg_slot_secondary;
+ for (i=0; i<8; ++i) {
+ if(hpte->dw0.dw0.v) {
+ printf("%d: (hpte)%.16lx %.16lx\n", i, hpte->dw0.dword0, hpte->dw1.dword1);
+ printf(" vsid: %.13lx api: %.2lx hash: %.1lx\n",
+ (hpte->dw0.dw0.avpn)>>5,
+ (hpte->dw0.dw0.avpn) & 0x1f,
+ (hpte->dw0.dw0.h));
+ printf(" rpn: %.13lx \n", (hpte->dw1.dw1.rpn));
+ printf(" pp: %.1lx \n",
+ ((hpte->dw1.dw1.pp0)<<2)|(hpte->dw1.dw1.pp));
+ printf(" wimgn: %.2lx reference: %.1lx change: %.1lx\n",
+ ((hpte->dw1.dw1.w)<<4)|
+ ((hpte->dw1.dw1.i)<<3)|
+ ((hpte->dw1.dw1.m)<<2)|
+ ((hpte->dw1.dw1.g)<<1)|
+ ((hpte->dw1.dw1.n)<<0),
+ hpte->dw1.dw1.r, hpte->dw1.dw1.c);
+ }
+ hpte++;
+ }
+
+ printf("\nHardware Segment Table\n-----------------------\n");
+ stabl = (unsigned long)(KERNELBASE+(_ASR&0xFFFFFFFFFFFFFFFE));
+ steg = (unsigned long *)((stabl) | ((esid & 0x1f) << 7));
+
+ printf("stab base : %.16lx\n", stabl);
+ printf("slot : %.16lx\n", steg);
+
+ for (i=0; i<8; ++i) {
+ printf("%d: (ste) %.16lx %.16lx\n", i,
+ *((unsigned long *)(steg+i*2)),*((unsigned long *)(steg+i*2+1)) );
+ }
+}
+
+void mem_check()
+{
+ unsigned long htab_size_bytes;
+ unsigned long htab_end;
+ unsigned long last_rpn;
+ HPTE *hpte1, *hpte2;
+
+ htab_size_bytes = htab_data.htab_num_ptegs * 128; // 128B / PTEG
+ htab_end = (unsigned long)htab_data.htab + htab_size_bytes;
+ // last_rpn = (naca->physicalMemorySize-1) >> PAGE_SHIFT;
+ last_rpn = 0xfffff;
+
+ printf("\nHardware Page Table Check\n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("htab size : %.16lx\n", htab_size_bytes);
+
+#if 1
+ for(hpte1 = htab_data.htab; hpte1 < (HPTE *)htab_end; hpte1++) {
+ if ( hpte1->dw0.dw0.v != 0 ) {
+ if ( hpte1->dw1.dw1.rpn <= last_rpn ) {
+ for(hpte2 = hpte1+1; hpte2 < (HPTE *)htab_end; hpte2++) {
+ if ( hpte2->dw0.dw0.v != 0 ) {
+ if(hpte1->dw1.dw1.rpn == hpte2->dw1.dw1.rpn) {
+ printf(" Duplicate rpn: %.13lx \n", (hpte1->dw1.dw1.rpn));
+ printf(" hpte1: %16.16lx *hpte1: %16.16lx %16.16lx\n",
+ hpte1, hpte1->dw0.dword0, hpte1->dw1.dword1);
+ printf(" hpte2: %16.16lx *hpte2: %16.16lx %16.16lx\n",
+ hpte2, hpte2->dw0.dword0, hpte2->dw1.dword1);
+ }
+ }
+ }
+ } else {
+ printf(" Bogus rpn: %.13lx \n", (hpte1->dw1.dw1.rpn));
+ printf(" hpte: %16.16lx *hpte: %16.16lx %16.16lx\n",
+ hpte1, hpte1->dw0.dword0, hpte1->dw1.dword1);
+ }
+ }
+ }
+#endif
+ printf("\nDone -------------------\n");
+}
+
+void mem_find_real()
+{
+ unsigned long htab_size_bytes;
+ unsigned long htab_end;
+ unsigned long last_rpn;
+ HPTE *hpte1;
+ unsigned long pa, rpn;
+ int c;
+
+ c = inchar();
+ if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
+ termch = c;
+ scanhex((void *)&pa);
+ rpn = pa >> 12;
+
+ htab_size_bytes = htab_data.htab_num_ptegs * 128; // 128B / PTEG
+ htab_end = (unsigned long)htab_data.htab + htab_size_bytes;
+ // last_rpn = (naca->physicalMemorySize-1) >> PAGE_SHIFT;
+ last_rpn = 0xfffff;
+
+ printf("\nMem Find RPN\n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("htab size : %.16lx\n", htab_size_bytes);
+
+ for(hpte1 = htab_data.htab; hpte1 < (HPTE *)htab_end; hpte1++) {
+ if ( hpte1->dw0.dw0.v != 0 ) {
+ if ( hpte1->dw1.dw1.rpn == rpn ) {
+ printf(" Found rpn: %.13lx \n", (hpte1->dw1.dw1.rpn));
+ printf(" hpte: %16.16lx *hpte1: %16.16lx %16.16lx\n",
+ hpte1, hpte1->dw0.dword0, hpte1->dw1.dword1);
+ }
+ }
+ }
+ printf("\nDone -------------------\n");
+}
+
+void mem_find_vsid()
+{
+ unsigned long htab_size_bytes;
+ unsigned long htab_end;
+ HPTE *hpte1;
+ unsigned long vsid;
+ int c;
+
+ c = inchar();
+ if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
+ termch = c;
+ scanhex((void *)&vsid);
+
+ htab_size_bytes = htab_data.htab_num_ptegs * 128; // 128B / PTEG
+ htab_end = (unsigned long)htab_data.htab + htab_size_bytes;
+
+ printf("\nMem Find VSID\n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("htab size : %.16lx\n", htab_size_bytes);
+
+ for(hpte1 = htab_data.htab; hpte1 < (HPTE *)htab_end; hpte1++) {
+ if ( hpte1->dw0.dw0.v != 0 ) {
+ if ( ((hpte1->dw0.dw0.avpn)>>5) == vsid ) {
+ printf(" Found vsid: %.16lx \n", ((hpte1->dw0.dw0.avpn) >> 5));
+ printf(" hpte: %16.16lx *hpte1: %16.16lx %16.16lx\n",
+ hpte1, hpte1->dw0.dword0, hpte1->dw1.dword1);
+ }
+ }
+ }
+ printf("\nDone -------------------\n");
+}
+
+void mem_map_check_slab()
+{
+ int i, slab_count;
+
+ i = max_mapnr;
+ slab_count = 0;
+
+ while (i-- > 0) {
+ if (PageSlab(mem_map+i)){
+ printf(" slab entry - mem_map entry =%p \n", mem_map+i);
+ slab_count ++;
+ }
+ }
+
+ printf(" count of pages for slab = %d \n", slab_count);
+}
+
+void mem_map_lock_pages()
+{
+ int i, lock_count;
+
+ i = max_mapnr;
+ lock_count = 0;
+
+ while (i-- > 0) {
+ if (PageLocked(mem_map+i)){
+ printf(" locked entry - mem_map entry =%p \n", mem_map+i);
+ lock_count ++;
+ }
+ }
+
+ printf(" count of locked pages = %d \n", lock_count);
+}
+
+
+
+void mem_map_check_hash()
+{
+ int i = max_mapnr;
+
+ while (i-- > 0) {
+ /* skip the reserved */
+ if (!PageReserved(mem_map+i)) {
+ if (((mem_map+i)->next_hash) != NULL) {
+ if ( REGION_ID((mem_map+i)->next_hash) != KERNEL_REGION_ID ) {
+ printf(" mem_map check hash - non c0 entry - "
+ "address/value = %p %lx\n", mem_map+i,(mem_map+i)->next_hash);
+ }
+ if ((unsigned long)((mem_map+i)->next_hash) == KERNELBASE){
+ printf(" mem_map check hash - 0x%lx entry = %p \n",
+ KERNELBASE, mem_map+i);
+ }
+ }
+ } else {
+ if (page_count(mem_map+i) < 0) {
+ printf(" reserved page with negative count- entry = %lx \n", mem_map+i);
+ }
+ }
+ }
+ printf(" mem_map check hash completed \n");
+}
+
+void mem_check_dup_rpn ()
+{
+ unsigned long htab_size_bytes;
+ unsigned long htab_end;
+ unsigned long last_rpn;
+ HPTE *hpte1, *hpte2;
+ int dup_count;
+ struct task_struct *p;
+ unsigned long kernel_vsid_c0,kernel_vsid_c1,kernel_vsid_c2,kernel_vsid_c3;
+ unsigned long kernel_vsid_c4,kernel_vsid_c5,kernel_vsid_d,kernel_vsid_e;
+ unsigned long kernel_vsid_f;
+ unsigned long vsid0,vsid1,vsidB,vsid2;
+
+ htab_size_bytes = htab_data.htab_num_ptegs * 128; // 128B / PTEG
+ htab_end = (unsigned long)htab_data.htab + htab_size_bytes;
+ // last_rpn = (naca->physicalMemorySize-1) >> PAGE_SHIFT;
+ last_rpn = 0xfffff;
+
+ printf("\nHardware Page Table Check\n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("htab size : %.16lx\n", htab_size_bytes);
+
+
+ for(hpte1 = htab_data.htab; hpte1 < (HPTE *)htab_end; hpte1++) {
+ if ( hpte1->dw0.dw0.v != 0 ) {
+ if ( hpte1->dw1.dw1.rpn <= last_rpn ) {
+ dup_count = 0;
+ for(hpte2 = hpte1+1; hpte2 < (HPTE *)htab_end; hpte2++) {
+ if ( hpte2->dw0.dw0.v != 0 ) {
+ if(hpte1->dw1.dw1.rpn == hpte2->dw1.dw1.rpn) {
+ dup_count++;
+ }
+ }
+ }
+ if(dup_count > 5) {
+ printf(" Duplicate rpn: %.13lx \n", (hpte1->dw1.dw1.rpn));
+ printf(" mem map array entry %p count = %d \n",
+ (mem_map+(hpte1->dw1.dw1.rpn)), (mem_map+(hpte1->dw1.dw1.rpn))->count);
+ for(hpte2 = hpte1+1; hpte2 < (HPTE *)htab_end; hpte2++) {
+ if ( hpte2->dw0.dw0.v != 0 ) {
+ if(hpte1->dw1.dw1.rpn == hpte2->dw1.dw1.rpn) {
+ printf(" hpte2: %16.16lx *hpte2: %16.16lx %16.16lx\n",
+ hpte2, hpte2->dw0.dword0, hpte2->dw1.dword1);
+ }
+ }
+ }
+ }
+ } else {
+ printf(" Bogus rpn: %.13lx \n", (hpte1->dw1.dw1.rpn));
+ printf(" hpte: %16.16lx *hpte: %16.16lx %16.16lx\n",
+ hpte1, hpte1->dw0.dword0, hpte1->dw1.dword1);
+ }
+ }
+ if (xmon_interrupted())
+ return;
+ }
+
+
+
+ // print the kernel vsids
+ kernel_vsid_c0 = get_kernel_vsid(0xC000000000000000);
+ kernel_vsid_c1 = get_kernel_vsid(0xC000000010000000);
+ kernel_vsid_c2 = get_kernel_vsid(0xC000000020000000);
+ kernel_vsid_c3 = get_kernel_vsid(0xC000000030000000);
+ kernel_vsid_c4 = get_kernel_vsid(0xC000000040000000);
+ kernel_vsid_c5 = get_kernel_vsid(0xC000000050000000);
+ kernel_vsid_d = get_kernel_vsid(0xD000000000000000);
+ kernel_vsid_e = get_kernel_vsid(0xE000000000000000);
+ kernel_vsid_f = get_kernel_vsid(0xF000000000000000);
+
+ printf(" kernel vsid - seg c0 = %lx\n", kernel_vsid_c0 );
+ printf(" kernel vsid - seg c1 = %lx\n", kernel_vsid_c1 );
+ printf(" kernel vsid - seg c2 = %lx\n", kernel_vsid_c2 );
+ printf(" kernel vsid - seg c3 = %lx\n", kernel_vsid_c3 );
+ printf(" kernel vsid - seg c4 = %lx\n", kernel_vsid_c4 );
+ printf(" kernel vsid - seg c5 = %lx\n", kernel_vsid_c5 );
+ printf(" kernel vsid - seg d = %lx\n", kernel_vsid_d );
+ printf(" kernel vsid - seg e = %lx\n", kernel_vsid_e );
+ printf(" kernel vsid - seg f = %lx\n", kernel_vsid_f );
+
+
+ // print a list of valid vsids for the tasks
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ if(p->mm) {
+ struct mm_struct *mm = p->mm;
+ printf(" task = %p mm = %lx pgd %lx\n",
+ p, mm, mm->pgd);
+ vsid0 = get_vsid( mm->context, 0 );
+ vsid1 = get_vsid( mm->context, 0x10000000 );
+ vsid2 = get_vsid( mm->context, 0x20000000 );
+ vsidB = get_vsid( mm->context, 0xB0000000 );
+ printf(" context = %lx vsid seg 0 = %lx\n", mm->context, vsid0 );
+ printf(" vsid seg 1 = %lx\n", vsid1 );
+ printf(" vsid seg 2 = %lx\n", vsid2 );
+ printf(" vsid seg 2 = %lx\n", vsidB );
+
+ printf("\n");
+ };
+ read_unlock(&tasklist_lock);
+
+ printf("\nDone -------------------\n");
+}
+
+
+
+void mem_check_pagetable_vsids ()
+{
+ unsigned long htab_size_bytes;
+ unsigned long htab_end;
+ unsigned long last_rpn;
+ struct task_struct *p;
+ unsigned long valid_table_count,invalid_table_count,bogus_rpn_count;
+ int found;
+ unsigned long user_address_table_count,kernel_page_table_count;
+ unsigned long pt_vsid;
+ HPTE *hpte1;
+
+
+ htab_size_bytes = htab_data.htab_num_ptegs * 128; // 128B / PTEG
+ htab_end = (unsigned long)htab_data.htab + htab_size_bytes;
+ // last_rpn = (naca->physicalMemorySize-1) >> PAGE_SHIFT;
+ last_rpn = 0xfffff;
+
+ printf("\nHardware Page Table Check\n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("htab size : %.16lx\n", htab_size_bytes);
+
+ valid_table_count = 0;
+ invalid_table_count = 0;
+ bogus_rpn_count = 0;
+ user_address_table_count = 0;
+ kernel_page_table_count = 0;
+ for(hpte1 = htab_data.htab; hpte1 < (HPTE *)htab_end; hpte1++) {
+ if ( hpte1->dw0.dw0.v != 0 ) {
+ valid_table_count++;
+ if ( hpte1->dw1.dw1.rpn <= last_rpn ) {
+ pt_vsid = (hpte1->dw0.dw0.avpn) >> 5;
+ if ((pt_vsid == get_kernel_vsid(0xC000000000000000)) |
+ (pt_vsid == get_kernel_vsid(0xC000000010000000)) |
+ (pt_vsid == get_kernel_vsid(0xC000000020000000)) |
+ (pt_vsid == get_kernel_vsid(0xC000000030000000)) |
+ (pt_vsid == get_kernel_vsid(0xC000000040000000)) |
+ (pt_vsid == get_kernel_vsid(0xC000000050000000)) |
+ (pt_vsid == get_kernel_vsid(0xD000000000000000)) |
+ (pt_vsid == get_kernel_vsid(0xE000000000000000)) |
+ (pt_vsid == get_kernel_vsid(0xF000000000000000)) ) {
+ kernel_page_table_count ++;
+ } else {
+ read_lock(&tasklist_lock);
+ found = 0;
+ for_each_task(p) {
+ if(p->mm && (found == 0)) {
+ struct mm_struct *mm = p->mm;
+
+ if ((pt_vsid == get_vsid( mm->context, 0 )) |
+ (pt_vsid == get_vsid( mm->context, 0x10000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x20000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x30000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x40000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x50000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x60000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x70000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x80000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0x90000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0xA0000000 )) |
+ (pt_vsid == get_vsid( mm->context, 0xB0000000 ))) {
+ user_address_table_count ++;
+ found = 1;
+ }
+ }
+ }
+ read_unlock(&tasklist_lock);
+ if (found == 0)
+ {
+ printf(" vsid not found vsid = %lx, hpte = %p \n",
+ pt_vsid,hpte1);
+ printf(" rpn in entry = %lx \n", hpte1->dw1.dw1.rpn);
+ printf(" mem map address = %lx \n", mem_map + (hpte1->dw1.dw1.rpn));
+
+ } else // found
+ {
+ }
+
+ } // good rpn
+
+ } else {
+ bogus_rpn_count ++;
+ }
+ } else {
+ invalid_table_count++;
+ }
+ }
+
+
+ printf(" page table valid counts - valid entries = %lx invalid entries = %lx \n",
+ valid_table_count, invalid_table_count);
+
+ printf(" bogus rpn entries ( probably io) = %lx \n", bogus_rpn_count);
+
+
+
+ printf(" page table counts - kernel entries = %lx user entries = %lx \n",
+ kernel_page_table_count, user_address_table_count);
+
+ printf("\nDone -------------------\n");
+
+}
+
+
+void mem_check_full_group()
+{
+ unsigned long htab_size_bytes;
+ unsigned count;
+ unsigned count_array[] = {0,0,0,0,0,0,0,0,0};
+ unsigned i;
+ unsigned long htab_end;
+ HPTE *hpte1, *hpte2, *hpte3;
+ u64 rpn = 0;
+
+ htab_size_bytes = htab_data.htab_num_ptegs * 128; // 128B / PTEG
+ htab_end = (unsigned long)htab_data.htab + htab_size_bytes;
+
+ printf("\nHardware Page Find full groups \n-------------------\n");
+ printf("htab base : %.16lx\n", htab_data.htab);
+ printf("htab size : %.16lx\n", htab_size_bytes);
+
+ for (hpte1 = htab_data.htab; (unsigned long)hpte1 < htab_end; hpte1= hpte1 + 8)
+ {
+ count = 0;
+ hpte2 = hpte1;
+ for (i=0; i<8; ++i)
+ {
+ if ( hpte2->dw0.dw0.v != 0 )
+ {
+ count++;
+ }
+ hpte2++;
+ }
+ if (count == 8 )
+ {
+ printf(" full group starting with entry %lx \n", hpte1);
+ hpte3 = hpte1;
+ for (i=0; i<8; ++i)
+ {
+ if ( hpte3->dw0.dw0.v != 0 )
+ {
+ printf(" entry number %d \n",i);
+ printf(" vsid: %.13lx api: %.2lx hash: %.1lx\n",
+ (hpte3->dw0.dw0.avpn)>>5,
+ (hpte3->dw0.dw0.avpn) & 0x1f,
+ (hpte3->dw0.dw0.h));
+ printf(" rpn: %.13lx \n", (hpte3->dw1.dw1.rpn));
+ // Dump out the memmap array entry address, corresponding virtual address, and reference count.
+ rpn = hpte3->dw1.dw1.rpn;
+ printf(" mem_map+rpn=%p, virtual@=%p, count=%lx \n", mem_map+rpn, (mem_map+rpn)->virtual, (mem_map+rpn)->count);
+ }
+ hpte3++;
+ }
+ if (xmon_interrupted())
+ return;
+ }
+
+ count_array[count]++;
+ }
+ for (i=1; i<9; ++i)
+ {
+ printf(" group count for size %i = %lx \n", i, count_array[i]);
+ }
+
+ printf("\nDone -------------------\n");
+}
+
+
+
+static void show_task(struct task_struct * p)
+{
+ /* unsigned long free = 0; --Unused */
+ int state;
+ static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
+
+ printf("--------------------------------------------------------------------------\n");
+ printf("%-11.11s pid: %5.5lx ppid: %5.5lx state: ",
+ p->comm, p->pid, p->p_pptr->pid);
+ state = p->state ? ffz(~p->state) + 1 : 0;
+ if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
+ printf(stat_nam[state]);
+ else
+ printf(" ");
+ if (p == current)
+ printf(" pc: current task ");
+ else
+ printf(" pc: 0x%16.16lx ", thread_saved_pc(&p->thread));
+
+ if (p->p_cptr)
+ printf("%5d ", p->p_cptr->pid);
+ else
+ printf(" ");
+ if (!p->mm)
+ printf(" (L-TLB) ");
+ else
+ printf(" (NOTLB) ");
+ if (p->p_ysptr)
+ printf("%7d", p->p_ysptr->pid);
+ else
+ printf(" ");
+ if (p->p_osptr)
+ printf(" %5d\n", p->p_osptr->pid);
+ else
+ printf("\n");
+
+ {
+ struct sigqueue *q;
+ char s[sizeof(sigset_t)*2+1], b[sizeof(sigset_t)*2+1];
+
+ render_sigset_t(&p->pending.signal, s);
+ render_sigset_t(&p->blocked, b);
+ printf(" sig: %d %s %s :", signal_pending(p), s, b);
+ for (q = p->pending.head; q ; q = q->next)
+ printf(" %d", q->info.si_signo);
+ printf(" X\n");
+ }
+
+ printf(" pers : %lx current : %lx",
+ p->personality, p);
+ printf("\n");
+
+ printf(" thread : 0x%16.16lx ksp : 0x%16.16lx\n",
+ &(p->thread), (p->thread.ksp));
+ printf(" pgdir : 0x%16.16lx\n", (p->thread.pgdir));
+ printf(" regs : 0x%16.16lx sysc : 0x%16.16lx\n",
+ (p->thread.regs), (p->thread.last_syscall));
+ if(p->thread.regs) {
+ printf(" nip : 0x%16.16lx msr : 0x%16.16lx\n",
+ ((p->thread.regs)->nip), ((p->thread.regs)->msr));
+ printf(" ctr : 0x%16.16lx link : 0x%16.16lx\n",
+ ((p->thread.regs)->ctr), ((p->thread.regs)->link));
+ printf(" xer : 0x%16.16lx ccr : 0x%16.16lx\n",
+ ((p->thread.regs)->xer), ((p->thread.regs)->ccr));
+ printf(" trap : 0x%16.16lx\n",
+ ((p->thread.regs)->trap));
+ printf(" dar : 0x%16.16lx dsis : 0x%16.16lx\n",
+ ((p->thread.regs)->dar), ((p->thread.regs)->dsisr));
+ printf(" rslt : 0x%16.16lx org3 : 0x%16.16lx\n",
+ ((p->thread.regs)->result), (p->thread.regs->orig_gpr3));
+ }
+
+ if(p->mm) {
+ struct mm_struct *mm = p->mm;
+ printf(" mm : 0x%16.16lx pgd : 0x%16.16lx\n",
+ mm, mm->pgd);
+ printf(" context: 0x%16.16lx mmap : 0x%16.16lx\n",
+ mm->context, mm->mmap);
+
+ printf("\n");
+ }
+
+}
+
+static void xmon_show_state(void)
+{
+ struct task_struct *p;
+
+#if (BITS_PER_LONG == 32)
+ printf("\n"
+ " free sibling\n");
+ printf("task name st PC stack pid father child younger older\n");
+#else
+ printf("\n"
+ " free sibling\n");
+ printf(" task PC stack pid father child younger older\n");
+#endif
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ show_task(p);
+ read_unlock(&tasklist_lock);
+}
+
+static void debug_trace(void) {
+ unsigned long val, cmd, on;
+
+ cmd = skipbl();
+ if (cmd == '\n') {
+ /* show current state */
+ unsigned long i;
+ printf("naca->debug_switch = 0x%lx\n", naca->debug_switch);
+ for (i = 0; i < PPCDBG_NUM_FLAGS ;i++) {
+ on = PPCDBG_BITVAL(i) & naca->debug_switch;
+ printf("%02x %s %12s ", i, on ? "on " : "off", trace_names[i] ? trace_names[i] : "");
+ if (((i+1) % 3) == 0)
+ printf("\n");
+ }
+ printf("\n");
+ return;
+ }
+ while (cmd != '\n') {
+ on = 1; /* default if no sign given */
+ while (cmd == '+' || cmd == '-') {
+ on = (cmd == '+');
+ cmd = inchar();
+ if (cmd == ' ' || cmd == '\n') { /* Turn on or off based on + or - */
+ naca->debug_switch = on ? PPCDBG_ALL:PPCDBG_NONE;
+ printf("Setting all values to %s...\n", on ? "on" : "off");
+ if (cmd == '\n') return;
+ else cmd = skipbl();
+ }
+ else
+ termch = cmd;
+ }
+ termch = cmd; /* not +/- ... let scanhex see it */
+ scanhex((void *)&val);
+ if (val >= 64) {
+ printf("Value %x out of range:\n", val);
+ return;
+ }
+ if (on) {
+ naca->debug_switch |= PPCDBG_BITVAL(val);
+ printf("enable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
+ } else {
+ naca->debug_switch &= ~PPCDBG_BITVAL(val);
+ printf("disable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
+ }
+ cmd = skipbl();
+ }
+}
--- /dev/null
+#ifndef _NACA_H
+#define _NACA_H
+
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+
+struct Naca
+{
+ void *xItVpdAreas;
+ void *xRamDisk;
+ u64 xRamDiskSize; /* In pages */
+ struct Paca *paca; /* Ptr to an array of pacas */
+ u64 debug_switch; /* Bits to control debug printing */
+ u16 processorCount; /* # of physical processors */
+ u16 dCacheL1LineSize; /* Line size of L1 DCache in bytes */
+ u16 dCacheL1LogLineSize; /* Log-2 of DCache line size */
+ u16 dCacheL1LinesPerPage; /* DCache lines per page */
+ u16 iCacheL1LineSize; /* Line size of L1 ICache in bytes */
+ u16 iCacheL1LogLineSize; /* Log-2 of ICache line size */
+ u16 iCacheL1LinesPerPage; /* ICache lines per page */
+ u16 slb_size; /* SLB size in entries */
+ u64 physicalMemorySize; /* Size of real memory in bytes */
+ u64 pftSize; /* Log base 2 of page table size */
+ u64 serialPortAddr; /* Phyical address of serial port */
+ u8 interrupt_controller; /* Type of interrupt controller */
+ u8 resv0[6]; /* Padding */
+};
+
+extern struct Naca *naca;
+
+#endif /* _NACA_H */
--- /dev/null
+#ifndef _PPC64_PACA_H
+#define _PPC64_PACA_H
+
+/*============================================================================
+ * Header File Id
+ * Name______________: Paca.H
+ *
+ * Description_______:
+ *
+ * This control block defines the PACA which defines the processor
+ * specific data for each logical processor on the system.
+ * There are some pointers defined that are utilized by PLIC.
+ *
+ * C 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/types.h>
+
+#define N_EXC_STACK 2
+
+/*-----------------------------------------------------------------------------
+ * Other Includes
+ *-----------------------------------------------------------------------------
+ */
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/iSeries/ItLpRegSave.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/rtas.h>
+#include <asm/mmu.h>
+#include <asm/processor.h>
+
+/* A Paca entry is required for each logical processor. On systems
+ * that support hardware multi-threading, this is equal to twice the
+ * number of physical processors. On LPAR systems, we are required
+ * to have space for the maximum number of logical processors we
+ * could ever possibly have. Currently, we are limited to allocating
+ * 24 processors to a partition which gives 48 logical processors on
+ * an HMT box. Therefore, we reserve this many Paca entries.
+ */
+#define maxProcessors 24
+#define maxPacas maxProcessors * 2
+
+extern struct Paca xPaca[];
+#define get_paca() ((struct Paca *)mfspr(SPRG3))
+
+/*============================================================================
+ * Name_______: Paca
+ *
+ * Description:
+ *
+ * Defines the layout of the Paca.
+ *
+ * This structure is not directly accessed by PLIC or the SP except
+ * for the first two pointers that point to the ItLpPaca area and the
+ * ItLpRegSave area for this processor. Both the ItLpPaca and
+ * ItLpRegSave objects are currently contained within the
+ * PACA but they do not need to be.
+ *
+ *============================================================================
+ */
+struct Paca
+{
+/*=====================================================================================
+ * CACHE_LINE_1 0x0000 - 0x007F
+ *=====================================================================================
+ */
+ struct ItLpPaca *xLpPacaPtr; /* Pointer to LpPaca for PLIC 0x00 */
+ struct ItLpRegSave *xLpRegSavePtr; /* Pointer to LpRegSave for PLIC 0x08 */
+ u64 xCurrent; /* Pointer to current 0x10 */
+ u16 xPacaIndex; /* Logical processor number 0x18 */
+ u16 xHwProcNum; /* Actual Hardware Processor Number 0x1a */
+ u32 default_decr; /* Default decrementer value 0x1c */
+ u64 xHrdIntStack; /* Stack for hardware interrupts 0x20 */
+ u64 xKsave; /* Saved Kernel stack addr or zero 0x28 */
+ u64 pvr; /* Processor version register 0x30 */
+ u8 *exception_sp; /* 0x38 */
+
+ struct ItLpQueue *lpQueuePtr; /* LpQueue handled by this processor 0x40 */
+ u64 xTOC; /* Kernel TOC address 0x48 */
+ STAB xStab_data; /* Segment table information 0x50,0x58,0x60 */
+ u8 xSegments[STAB_CACHE_SIZE]; /* Cache of used stab entries 0x68,0x70 */
+ u8 xProcEnabled; /* 1=soft enabled 0x78 */
+ u8 xHrdIntCount; /* Count of active hardware interrupts 0x79 */
+ u8 prof_enabled; /* 1=iSeries profiling enabled 0x7A */
+ u8 resv1[5]; /* 0x7B-0x7F */
+
+/*=====================================================================================
+ * CACHE_LINE_2 0x0080 - 0x00FF
+ *=====================================================================================
+ */
+ u64 *pgd_cache; /* 0x00 */
+ u64 *pmd_cache; /* 0x08 */
+ u64 *pte_cache; /* 0x10 */
+ u64 pgtable_cache_sz; /* 0x18 */
+ u64 next_jiffy_update_tb; /* TB value for next jiffy update 0x20 */
+ u32 lpEvent_count; /* lpEvents processed 0x28 */
+ u32 prof_multiplier; /* 0x2C */
+ u32 prof_counter; /* 0x30 */
+ u32 prof_shift; /* iSeries shift for profile bucket size0x34 */
+ u32 *prof_buffer; /* iSeries profiling buffer 0x38 */
+ u32 *prof_stext; /* iSeries start of kernel text 0x40 */
+ u32 prof_len; /* iSeries length of profile buffer -1 0x48 */
+ u8 rsvd2[128-76]; /* 0x4C */
+
+/*=====================================================================================
+ * CACHE_LINE_3 0x0100 - 0x017F
+ *=====================================================================================
+ */
+ u8 xProcStart; /* At startup, processor spins until 0x100 */
+ /* xProcStart becomes non-zero. */
+ u8 rsvd3[127];
+
+/*=====================================================================================
+ * CACHE_LINE_4-8 0x0180 - 0x03FF Contains ItLpPaca
+ *=====================================================================================
+ */
+ struct ItLpPaca xLpPaca; /* Space for ItLpPaca */
+
+/*=====================================================================================
+ * CACHE_LINE_9-16 0x0400 - 0x07FF Contains ItLpRegSave
+ *=====================================================================================
+ */
+ struct ItLpRegSave xRegSav; /* Register save for proc */
+
+/*=====================================================================================
+ * CACHE_LINE_17-18 0x0800 - 0x0EFF Reserved
+ *=====================================================================================
+ */
+ struct rtas_args xRtas; /* Per processor RTAS struct */
+ u64 xR1; /* r1 save for RTAS calls */
+ u64 xSavedMsr; /* Old msr saved here by HvCall */
+ u8 rsvd5[256-16-sizeof(struct rtas_args)];
+
+/*=====================================================================================
+ * CACHE_LINE_19-30 0x0800 - 0x0EFF Reserved
+ *=====================================================================================
+ */
+ u8 rsvd6[0x600];
+
+/*=====================================================================================
+ * CACHE_LINE_31 0x0F00 - 0x0F7F Exception stack
+ *=====================================================================================
+ */
+ u8 exception_stack[N_EXC_STACK*EXC_FRAME_SIZE];
+
+/*=====================================================================================
+ * CACHE_LINE_32 0x0F80 - 0x0FFF Reserved
+ *=====================================================================================
+ */
+ u8 rsvd7[0x80]; /* Give the stack some rope ... */
+
+/*=====================================================================================
+ * Page 2 Reserved for guard page. Also used as a stack early in SMP boots before
+ * relocation is enabled.
+ *=====================================================================================
+ */
+ u8 guard[0x1000]; /* ... and then hang 'em */
+};
+
+#endif /* _PPC64_PACA_H */
--- /dev/null
+#ifndef __PPC64_A_OUT_H__
+#define __PPC64_A_OUT_H__
+
+#include <asm/ppcdebug.h>
+
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct exec
+{
+ unsigned long a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
+#ifdef __KERNEL__
+
+/* 64-bit user address space is less PGDIR_SIZE due to pgd_index() bug. */
+#define STACK_TOP_USER64 (TASK_SIZE_USER64 - PGDIR_SIZE)
+
+/* Give 32-bit user space a full 4G address space to live in. */
+#define STACK_TOP_USER32 (TASK_SIZE_USER32)
+
+#define STACK_TOP ((test_thread_flag(TIF_32BIT) || \
+ (ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
+ STACK_TOP_USER32 : STACK_TOP_USER64)
+
+#endif /* __KERNEL__ */
+
+#endif /* __PPC64_A_OUT_H__ */
--- /dev/null
+#ifndef _ABS_ADDR_H
+#define _ABS_ADDR_H
+
+#include <linux/config.h>
+
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+
+typedef u32 msChunks_entry;
+struct msChunks {
+ unsigned long num_chunks;
+ unsigned long chunk_size;
+ unsigned long chunk_shift;
+ unsigned long chunk_mask;
+ msChunks_entry *abs;
+};
+
+extern struct msChunks msChunks;
+
+extern unsigned long msChunks_alloc(unsigned long, unsigned long, unsigned long);
+extern unsigned long reloc_offset(void);
+
+#ifdef CONFIG_MSCHUNKS
+
+static inline unsigned long
+chunk_to_addr(unsigned long chunk)
+{
+ unsigned long offset = reloc_offset();
+ struct msChunks *_msChunks = PTRRELOC(&msChunks);
+
+ return chunk << _msChunks->chunk_shift;
+}
+
+static inline unsigned long
+addr_to_chunk(unsigned long addr)
+{
+ unsigned long offset = reloc_offset();
+ struct msChunks *_msChunks = PTRRELOC(&msChunks);
+
+ return addr >> _msChunks->chunk_shift;
+}
+
+static inline unsigned long
+chunk_offset(unsigned long addr)
+{
+ unsigned long offset = reloc_offset();
+ struct msChunks *_msChunks = PTRRELOC(&msChunks);
+
+ return addr & _msChunks->chunk_mask;
+}
+
+static inline unsigned long
+abs_chunk(unsigned long pchunk)
+{
+ unsigned long offset = reloc_offset();
+ struct msChunks *_msChunks = PTRRELOC(&msChunks);
+ if ( pchunk >= _msChunks->num_chunks ) {
+ return pchunk;
+ }
+ return PTRRELOC(_msChunks->abs)[pchunk];
+}
+
+
+static inline unsigned long
+phys_to_absolute(unsigned long pa)
+{
+ return chunk_to_addr(abs_chunk(addr_to_chunk(pa))) + chunk_offset(pa);
+}
+
+static inline unsigned long
+physRpn_to_absRpn(unsigned long rpn)
+{
+ unsigned long pa = rpn << PAGE_SHIFT;
+ unsigned long aa = phys_to_absolute(pa);
+ return (aa >> PAGE_SHIFT);
+}
+
+static inline unsigned long
+absolute_to_phys(unsigned long aa)
+{
+ return lmb_abs_to_phys(aa);
+}
+
+#else /* !CONFIG_MSCHUNKS */
+
+#define chunk_to_addr(chunk) ((unsigned long)(chunk))
+#define addr_to_chunk(addr) (addr)
+#define chunk_offset(addr) (0)
+#define abs_chunk(pchunk) (pchunk)
+
+#define phys_to_absolute(pa) (pa)
+#define physRpn_to_absRpn(rpn) (rpn)
+#define absolute_to_phys(aa) (aa)
+
+#endif /* CONFIG_MSCHUNKS */
+
+
+static inline unsigned long
+virt_to_absolute(unsigned long ea)
+{
+ return phys_to_absolute(__pa(ea));
+}
+
+static inline unsigned long
+absolute_to_virt(unsigned long aa)
+{
+ return (unsigned long)__va(absolute_to_phys(aa));
+}
+
+#endif /* _ABS_ADDR_H */
--- /dev/null
+/*
+ * PowerPC64 atomic operations
+ *
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PPC64_ATOMIC_H_
+#define _ASM_PPC64_ATOMIC_H_
+
+#include <asm/memory.h>
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#endif /* _ASM_PPC64_ATOMIC_H_ */
--- /dev/null
+/*
+ * PowerPC64 atomic bit operations.
+ * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner,
+ * Anton Blanchard
+ *
+ * Originally taken from the 32b PPC code. Modified to use 64b values for
+ * the various counters & memory references.
+ *
+ * Bitops are odd when viewed on big-endian systems. They were designed
+ * on little endian so the size of the bitset doesn't matter (low order bytes
+ * come first) as long as the bit in question is valid.
+ *
+ * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
+ * our best to stay compatible with that. The assumption is that val will
+ * be unsigned long for such tests. As such, we assume the bits are stored
+ * as an array of unsigned long (the usual case is a single unsigned long,
+ * of course). Here's an example bitset with bit numbering:
+ *
+ * |63..........0|127........64|195.......128|255.......196|
+ *
+ * This leads to a problem. If an int, short or char is passed as a bitset
+ * it will be a bad memory reference since we want to store in chunks
+ * of unsigned long (64 bits here) size.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC64_BITOPS_H
+#define _PPC64_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <asm/memory.h>
+
+/*
+ * clear_bit doesn't imply a memory barrier
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+static __inline__ int test_bit(unsigned long nr, __const__ volatile void *addr)
+{
+ return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63)));
+}
+
+static __inline__ void set_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long old;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # set_bit\n\
+ or %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc");
+}
+
+static __inline__ void clear_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long old;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # clear_bit\n\
+ andc %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc");
+}
+
+static __inline__ void change_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long old;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # change_bit\n\
+ xor %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc");
+}
+
+static __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # test_and_set_bit\n\
+ or %1,%0,%2 \n\
+ stdcx. %1,0,%3 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # test_and_clear_bit\n\
+ andc %1,%0,%2\n\
+ stdcx. %1,0,%3\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # test_and_change_bit\n\
+ xor %1,%0,%2\n\
+ stdcx. %1,0,%3\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+/*
+ * non-atomic versions
+ */
+static __inline__ void __set_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ *p |= mask;
+}
+
+static __inline__ void __clear_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ *p &= ~mask;
+}
+
+static __inline__ void __change_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ *p ^= mask;
+}
+
+static __inline__ int __test_and_set_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_clear_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_change_bit(unsigned long nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/*
+ * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the
+ * most significant (left-most) 1-bit in a double word.
+ */
+static __inline__ int __ilog2(unsigned long x)
+{
+ int lz;
+
+ asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
+ return 63 - lz;
+}
+
+/*
+ * Determines the bit position of the least significant (rightmost) 0 bit
+ * in the specified double word. The returned bit position will be zero-based,
+ * starting from the right side (63 - 0).
+ */
+static __inline__ unsigned long ffz(unsigned long x)
+{
+ /* no zero exists anywhere in the 8 byte area. */
+ if ((x = ~x) == 0)
+ return 64;
+
+ /*
+ * Calculate the bit position of the least signficant '1' bit in x
+ * (since x has been changed this will actually be the least signficant
+ * '0' bit in * the original x). Note: (x & -x) gives us a mask that
+ * is the least significant * (RIGHT-most) 1-bit of the value in x.
+ */
+ return __ilog2(x & -x);
+}
+
+static __inline__ int __ffs(unsigned long x)
+{
+ return __ilog2(x & -x);
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ unsigned long i = (unsigned long)x;
+ return __ilog2(i & -i) + 1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+extern unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset);
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
+
+extern unsigned long find_next_bit(void *addr, unsigned long size, unsigned long offset);
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
+extern unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset);
+#define find_first_zero_le_bit(addr, size) \
+ find_next_zero_le_bit((addr), (size), 0)
+
+static __inline__ int test_le_bit(unsigned long nr, __const__ void * addr)
+{
+ __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
+ return (ADDR[nr >> 3] >> (nr & 7)) & 1;
+}
+
+/*
+ * non-atomic versions
+ */
+static __inline__ void __set_le_bit(unsigned long nr, void *addr)
+{
+ unsigned char *ADDR = (unsigned char *)addr;
+
+ ADDR += nr >> 3;
+ *ADDR |= 1 << (nr & 0x07);
+}
+
+static __inline__ void __clear_le_bit(unsigned long nr, void *addr)
+{
+ unsigned char *ADDR = (unsigned char *)addr;
+
+ ADDR += nr >> 3;
+ *ADDR &= ~(1 << (nr & 0x07));
+}
+
+static __inline__ int __test_and_set_le_bit(unsigned long nr, void *addr)
+{
+ int mask, retval;
+ unsigned char *ADDR = (unsigned char *)addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ retval = (mask & *ADDR) != 0;
+ *ADDR |= mask;
+ return retval;
+}
+
+static __inline__ int __test_and_clear_le_bit(unsigned long nr, void *addr)
+{
+ int mask, retval;
+ unsigned char *ADDR = (unsigned char *)addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ retval = (mask & *ADDR) != 0;
+ *ADDR &= ~mask;
+ return retval;
+}
+
+#define ext2_set_bit __test_and_set_le_bit
+#define ext2_clear_bit __test_and_clear_le_bit
+#define ext2_test_bit test_le_bit
+#define ext2_find_first_zero_bit find_first_zero_le_bit
+#define ext2_find_next_zero_bit find_next_zero_le_bit
+
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_BITOPS_H */
--- /dev/null
+/*
+ * Non-machine dependent bootinfo structure. Basic idea
+ * borrowed from the m68k.
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@ppc.kernel.org>
+ * Copyright (c) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#ifndef _PPC64_BOOTINFO_H
+#define _PPC64_BOOTINFO_H
+
+#include <asm/types.h>
+
+/* We use a u32 for the type of the fields since they're written by
+ * the bootloader which is a 32-bit process and read by the kernel
+ * which is a 64-bit process. This way they can both agree on the
+ * size of the type.
+ */
+typedef u32 bi_rec_field;
+
+struct bi_record {
+ bi_rec_field tag; /* tag ID */
+ bi_rec_field size; /* size of record (in bytes) */
+ bi_rec_field data[0]; /* data */
+};
+
+#define BI_FIRST 0x1010 /* first record - marker */
+#define BI_LAST 0x1011 /* last record - marker */
+#define BI_CMD_LINE 0x1012
+#define BI_BOOTLOADER_ID 0x1013
+#define BI_INITRD 0x1014
+#define BI_SYSMAP 0x1015
+#define BI_MACHTYPE 0x1016
+
+static __inline__ struct bi_record * bi_rec_init(unsigned long addr)
+{
+ struct bi_record *bi_recs;
+ bi_recs = (struct bi_record *)_ALIGN(addr, PAGE_SIZE);
+ bi_recs->size = 0;
+ return bi_recs;
+}
+
+static __inline__ struct bi_record * bi_rec_alloc(struct bi_record *rec,
+ unsigned long args)
+{
+ rec = (struct bi_record *)((unsigned long)rec + rec->size);
+ rec->size = sizeof(struct bi_record) + args*sizeof(bi_rec_field);
+ return rec;
+}
+
+static __inline__ struct bi_record * bi_rec_alloc_bytes(struct bi_record *rec,
+ unsigned long bytes)
+{
+ rec = (struct bi_record *)((unsigned long)rec + rec->size);
+ rec->size = sizeof(struct bi_record) + bytes;
+ return rec;
+}
+
+static __inline__ struct bi_record * bi_rec_next(struct bi_record *rec)
+{
+ return (struct bi_record *)((unsigned long)rec + rec->size);
+}
+
+#endif /* _PPC64_BOOTINFO_H */
--- /dev/null
+/*
+ * This file is included by 'init/main.c' to check for architecture-dependent
+ * bugs.
+ *
+ */
+
+static void check_bugs(void) {
+}
--- /dev/null
+#ifndef _PPC64_BYTEORDER_H
+#define _PPC64_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+
+#ifdef __GNUC__
+#ifdef __KERNEL__
+
+static __inline__ __u16 ld_le16(const volatile __u16 *addr)
+{
+ __u16 val;
+
+ __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
+{
+ __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static __inline__ __u32 ld_le32(const volatile __u32 *addr)
+{
+ __u32 val;
+
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
+{
+ __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+#if 0
+static __inline__ __const__ __u16 ___arch__swab16(__u16 value)
+{
+ __u16 result;
+
+ __asm__("rlwimi %0,%1,8,16,23"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 8));
+ return result;
+}
+
+static __inline__ __const__ __u32 ___arch__swab32(__u32 value)
+{
+ __u32 result;
+
+ __asm__("rlwimi %0,%1,24,16,23\n\t"
+ "rlwimi %0,%1,8,8,15\n\t"
+ "rlwimi %0,%1,24,0,7"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 24));
+ return result;
+}
+
+static __inline__ __const__ __u64 ___arch__swab64(__u64 value)
+{
+ __u64 result;
+#error implement me
+}
+
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab64(x) ___arch__swab64(x)
+
+#endif
+
+/* The same, but returns converted value from the location pointer by addr. */
+#define __arch__swab16p(addr) ld_le16(addr)
+#define __arch__swab32p(addr) ld_le32(addr)
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define __arch__swab16s(addr) st_le16(addr,*addr)
+#define __arch__swab32s(addr) st_le32(addr,*addr)
+
+#endif /* __KERNEL__ */
+
+#ifndef __STRICT_ANSI__
+#define __BYTEORDER_HAS_U64__
+#endif
+
+#endif /* __GNUC__ */
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _PPC64_BYTEORDER_H */
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ARCH_PPC64_CACHE_H
+#define __ARCH_PPC64_CACHE_H
+
+/* bytes per L1 cache line */
+#define L1_CACHE_BYTES 128
+
+#endif
--- /dev/null
+#ifndef _PPC64_CHECKSUM_H
+#define _PPC64_CHECKSUM_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries. ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern unsigned short csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern unsigned int csum_partial(const unsigned char * buff, int len,
+ unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src to dst while it
+ * checksums
+ */
+unsigned int csum_partial_copy(const char *src, char *dst,
+ int len, unsigned int sum);
+
+extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
+ int len, unsigned int sum,
+ int *src_err, int *dst_err);
+/*
+ * the same as csum_partial, but copies from user space.
+ */
+
+unsigned int csum_partial_copy_fromuser(const char *src,
+ char *dst,
+ int len,
+ unsigned int sum,
+ int *src_err);
+
+unsigned int csum_partial_copy_nocheck(const char *src,
+ char *dst,
+ int len,
+ unsigned int sum);
+
+/*
+ * turns a 32-bit partial checksum (e.g. from csum_partial) into a
+ * 1's complement 16-bit checksum.
+ */
+static inline unsigned int csum_fold(unsigned int sum)
+{
+ unsigned int tmp;
+
+ /* swap the two 16-bit halves of sum */
+ __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
+ /* if there is a carry from adding the two 16-bit halves,
+ it will carry from the lower half into the upper half,
+ giving us the correct sum in the upper half. */
+ sum = ~(sum + tmp) >> 16;
+ return sum;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
+ csum_partial_copy_generic((src), (dst), (len), (sum), (errp), 0)
+
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0)
+
+static inline u32 csum_tcpudp_nofold(u32 saddr,
+ u32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ unsigned long s = sum;
+
+ s += saddr;
+ s += daddr;
+ s += (proto << 16) + len;
+ s += (s >> 32);
+ return (u32) s;
+}
+
+#endif
--- /dev/null
+#ifndef _PPC64_CURRENT_H
+#define _PPC64_CURRENT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Use r13 for current since the ppc64 ABI reserves it - Anton
+ */
+
+register struct task_struct *current asm ("r13");
+
+#endif /* !(_PPC64_CURRENT_H) */
--- /dev/null
+#ifndef _PPC64_DELAY_H
+#define _PPC64_DELAY_H
+
+/*
+ * Copyright 1996, Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
+ * Anton Blanchard.
+ */
+
+extern unsigned long tb_ticks_per_usec;
+
+/* define these here to prevent circular dependencies */
+#define __HMT_low() asm volatile("or 1,1,1")
+#define __HMT_medium() asm volatile("or 2,2,2")
+
+static inline unsigned long __get_tb(void)
+{
+ unsigned long rval;
+
+ asm volatile("mftb %0" : "=r" (rval));
+ return rval;
+}
+
+static inline void __delay(unsigned long loops)
+{
+ unsigned long start = __get_tb();
+
+ while((__get_tb()-start) < loops)
+ __HMT_low();
+}
+
+static inline void udelay(unsigned long usecs)
+{
+ unsigned long loops = tb_ticks_per_usec * usecs;
+
+ __delay(loops);
+ __HMT_medium();
+}
+
+#endif /* _PPC64_DELAY_H */
--- /dev/null
+#ifndef __PPC_DIV64
+#define __PPC_DIV64
+
+/* Copyright 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define do_div(n,base) ({ \
+ int __res; \
+ __res = ((unsigned long) (n)) % (unsigned) (base); \
+ (n) = ((unsigned long) (n)) / (unsigned) (base); \
+ __res; })
+
+#endif
--- /dev/null
+/*
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * Changes for ppc sound by Christoph Nadig
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <linux/config.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+#ifndef MAX_DMA_CHANNELS
+#define MAX_DMA_CHANNELS 8
+#endif
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS (~0UL)
+
+#define dma_outb outb
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_LO_PAGE_0 0x87 /* DMA page registers */
+#define DMA_LO_PAGE_1 0x83
+#define DMA_LO_PAGE_2 0x81
+#define DMA_LO_PAGE_3 0x82
+#define DMA_LO_PAGE_5 0x8B
+#define DMA_LO_PAGE_6 0x89
+#define DMA_LO_PAGE_7 0x8A
+
+#define DMA_HI_PAGE_0 0x487 /* DMA page registers */
+#define DMA_HI_PAGE_1 0x483
+#define DMA_HI_PAGE_2 0x481
+#define DMA_HI_PAGE_3 0x482
+#define DMA_HI_PAGE_5 0x48B
+#define DMA_HI_PAGE_6 0x489
+#define DMA_HI_PAGE_7 0x48A
+
+#define DMA1_EXT_REG 0x40B
+#define DMA2_EXT_REG 0x4D6
+
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ unsigned char ucDmaCmd=0x00;
+
+ if (dmanr != 4)
+ {
+ dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
+ dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */
+ }
+ if (dmanr<=3)
+ {
+ dma_outb(dmanr, DMA1_MASK_REG);
+ dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
+ } else
+ {
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+ }
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr<=3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
+{
+ switch(dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_LO_PAGE_0);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_LO_PAGE_1);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_LO_PAGE_2);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_LO_PAGE_3);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
+ dma_outb(pagenr>>8, DMA_HI_PAGE_7);
+ break;
+ }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
+{
+ if (dmanr <= 3) {
+ dma_outb( phys & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ dma_outb( (phys>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
+ dma_outb( (phys>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ dma_outb( (phys>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ }
+ set_dma_page(dmanr, phys>>16);
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ } else {
+ dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr <= 3)? count : (count<<1);
+}
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+#endif /* _ASM_DMA_H */
--- /dev/null
+/*
+ * eeh.h
+ * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Start Change Log
+ * 2001/10/27 : engebret : Created.
+ * End Change Log
+ */
+
+#ifndef _EEH_H
+#define _EEH_H
+
+struct pci_dev;
+
+#define IO_UNMAPPED_REGION_ID 0xaUL
+
+#define IO_TOKEN_TO_ADDR(token) ((((unsigned long)(token)) & 0xFFFFFFFF) | (0xEUL << 60))
+/* Flag bits encoded in the 3 unused function bits of devfn */
+#define EEH_TOKEN_DISABLED (1UL << 34UL) /* eeh is disabled for this token */
+#define IS_EEH_TOKEN_DISABLED(token) ((unsigned long)(token) & EEH_TOKEN_DISABLED)
+
+#define EEH_STATE_OVERRIDE 1 /* IOA does not require eeh traps */
+#define EEH_STATE_FAILURE 16 /* */
+
+/* This is for profiling only and should be removed */
+extern unsigned long eeh_total_mmio_reads;
+extern unsigned long eeh_total_mmio_ffs;
+
+void eeh_init(void);
+int eeh_get_state(unsigned long ea);
+unsigned long eeh_check_failure(void *token, unsigned long val);
+
+#define EEH_DISABLE 0
+#define EEH_ENABLE 1
+#define EEH_RELEASE_LOADSTORE 2
+#define EEH_RELEASE_DMA 3
+int eeh_set_option(struct pci_dev *dev, int options);
+
+/* Given a PCI device check if eeh should be configured or not.
+ * This may look at firmware properties and/or kernel cmdline options.
+ */
+int is_eeh_configured(struct pci_dev *dev);
+
+/* Generate an EEH token.
+ * The high nibble of the offset is cleared, otherwise bounds checking is performed.
+ * Use IO_TOKEN_TO_ADDR(token) to translate this token back to a mapped virtual addr.
+ * Do NOT do this to perform IO -- use the read/write macros!
+ */
+unsigned long eeh_token(unsigned long phb,
+ unsigned long bus,
+ unsigned long devfn,
+ unsigned long offset);
+
+extern void *memcpy(void *, const void *, unsigned long);
+extern void *memset(void *,int, unsigned long);
+
+/* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
+ *
+ * Order this macro for performance.
+ * If EEH is off for a device and it is a memory BAR, ioremap will
+ * map it to the IOREGION. In this case addr == vaddr and since these
+ * should be in registers we compare them first. Next we check for
+ * all ones which is perhaps fastest as ~val. Finally we weed out
+ * EEH disabled IO BARs.
+ *
+ * If this macro yields TRUE, the caller relays to eeh_check_failure()
+ * which does further tests out of line.
+ */
+/* #define EEH_POSSIBLE_ERROR(addr, vaddr, val) ((vaddr) != (addr) && ~(val) == 0 && !IS_EEH_TOKEN_DISABLED(addr)) */
+/* This version is rearranged to collect some profiling data */
+#define EEH_POSSIBLE_ERROR(addr, vaddr, val) (++eeh_total_mmio_reads, (~(val) == 0 && (++eeh_total_mmio_ffs, (vaddr) != (addr) && !IS_EEH_TOKEN_DISABLED(addr))))
+
+/*
+ * MMIO read/write operations with EEH support.
+ *
+ * addr: 64b token of the form 0xA0PPBBDDyyyyyyyy
+ * 0xA0 : Unmapped MMIO region
+ * PP : PHB index (starting at zero)
+ * BB : PCI Bus number under given PHB
+ * DD : PCI devfn under given bus
+ * yyyyyyyy : Virtual address offset
+ *
+ * An actual virtual address is produced from this token
+ * by masking into the form:
+ * 0xE0000000yyyyyyyy
+ */
+static inline u8 eeh_readb(void *addr) {
+ volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
+ u8 val = in_8(vaddr);
+ if (EEH_POSSIBLE_ERROR(addr, vaddr, val))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+static inline void eeh_writeb(u8 val, void *addr) {
+ volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
+ out_8(vaddr, val);
+}
+static inline u16 eeh_readw(void *addr) {
+ volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
+ u16 val = in_le16(vaddr);
+ if (EEH_POSSIBLE_ERROR(addr, vaddr, val))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+static inline void eeh_writew(u16 val, void *addr) {
+ volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
+ out_le16(vaddr, val);
+}
+static inline u32 eeh_readl(void *addr) {
+ volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
+ u32 val = in_le32(vaddr);
+ if (EEH_POSSIBLE_ERROR(addr, vaddr, val))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+static inline void eeh_writel(u32 val, void *addr) {
+ volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
+ out_le32(vaddr, val);
+}
+
+static inline void eeh_memset_io(void *addr, int c, unsigned long n) {
+ void *vaddr = (void *)IO_TOKEN_TO_ADDR(addr);
+ memset(vaddr, c, n);
+}
+static inline void eeh_memcpy_fromio(void *dest, void *src, unsigned long n) {
+ void *vsrc = (void *)IO_TOKEN_TO_ADDR(src);
+ memcpy(dest, vsrc, n);
+ /* look for ffff's here at dest[n] */
+}
+static inline void eeh_memcpy_toio(void *dest, void *src, unsigned long n) {
+ void *vdest = (void *)IO_TOKEN_TO_ADDR(dest);
+ memcpy(vdest, src, n);
+}
+
+#endif /* _EEH_H */
--- /dev/null
+#ifndef __PPC64_ELF_H
+#define __PPC64_ELF_H
+
+/*
+ * ELF register definitions..
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ptrace.h>
+
+#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
+#define ELF_NFPREG 33 /* includes fpscr */
+#define ELF_NVRREG 33 /* includes vscr */
+
+typedef unsigned long elf_greg_t64;
+typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
+
+typedef unsigned int elf_greg_t32;
+typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#ifndef ELF_ARCH
+# define ELF_ARCH EM_PPC64
+# define ELF_CLASS ELFCLASS64
+# define ELF_DATA ELFDATA2MSB
+ typedef elf_greg_t64 elf_greg_t;
+ typedef elf_gregset_t64 elf_gregset_t;
+# define elf_addr_t unsigned long
+# define elf_caddr_t char *
+#else
+ /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
+ typedef elf_greg_t32 elf_greg_t;
+ typedef elf_gregset_t32 elf_gregset_t;
+# define elf_addr_t u32
+# define elf_caddr_t u32
+#endif
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (0x08000000)
+
+/* Common routine for both 32-bit and 64-bit processes */
+#define ELF_CORE_COPY_REGS(gregs, regs) elf_core_copy_regs(gregs, regs);
+static inline void
+elf_core_copy_regs(elf_gregset_t dstRegs, struct pt_regs* srcRegs)
+{
+ int i;
+
+ int numGPRS = ((sizeof(struct pt_regs)/sizeof(elf_greg_t64)) < ELF_NGREG) ? (sizeof(struct pt_regs)/sizeof(elf_greg_t64)) : ELF_NGREG;
+
+ for (i=0; i < numGPRS; i++)
+ dstRegs[i] = (elf_greg_t)((elf_greg_t64 *)srcRegs)[i];
+}
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) \
+do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ set_thread_flag(TIF_32BIT); \
+ else \
+ clear_thread_flag(TIF_32BIT); \
+ if (ibcs2) \
+ set_personality(PER_SVR4); \
+ else if (current->personality != PER_LINUX32) \
+ set_personality(PER_LINUX); \
+} while (0)
+#endif
+
+/*
+ * We need to put in some extra aux table entries to tell glibc what
+ * the cache block size is, so it can use the dcbz instruction safely.
+ */
+#define AT_DCACHEBSIZE 19
+#define AT_ICACHEBSIZE 20
+#define AT_UCACHEBSIZE 21
+/* A special ignored type value for PPC, for glibc compatibility. */
+#define AT_IGNOREPPC 22
+
+extern int dcache_bsize;
+extern int icache_bsize;
+extern int ucache_bsize;
+
+/*
+ * The requirements here are:
+ * - keep the final alignment of sp (sp & 0xf)
+ * - make sure the 32-bit value at the first 16 byte aligned position of
+ * AUXV is greater than 16 for glibc compatibility.
+ * AT_IGNOREPPC is used for that.
+ * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
+ * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
+ */
+#define DLINFO_ARCH_ITEMS 3
+#define ARCH_DLINFO \
+do { \
+ sp -= DLINFO_ARCH_ITEMS * 2; \
+ NEW_AUX_ENT(0, AT_DCACHEBSIZE, dcache_bsize); \
+ NEW_AUX_ENT(1, AT_ICACHEBSIZE, icache_bsize); \
+ NEW_AUX_ENT(2, AT_UCACHEBSIZE, ucache_bsize); \
+ /* \
+ * Now handle glibc compatibility. \
+ */ \
+ sp -= 2*2; \
+ NEW_AUX_ENT(0, AT_IGNOREPPC, AT_IGNOREPPC); \
+ NEW_AUX_ENT(1, AT_IGNOREPPC, AT_IGNOREPPC); \
+ } while (0)
+
+#endif /* __PPC64_ELF_H */
--- /dev/null
+#ifndef _PPC64_ERRNO_H
+#define _PPC64_ERRNO_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+#define EDEADLOCK 58 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+/* Should never be seen by user programs */
+#define ERESTARTSYS 512
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514 /* restart if no handler.. */
+#define ENOIOCTLCMD 515 /* No ioctl command */
+
+#define _LAST_ERRNO 515
+
+#endif
--- /dev/null
+#ifndef _PPC64_FCNTL_H
+#define _PPC64_FCNTL_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+#define O_DIRECTORY 040000 /* must be a directory */
+#define O_NOFOLLOW 0100000 /* don't follow links */
+#define O_LARGEFILE 0200000
+#define O_DIRECT 0400000 /* direct disk access hint - currently ignored */
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get close_on_exec */
+#define F_SETFD 2 /* set/clear close_on_exec */
+#define F_GETFL 3 /* get file->f_flags */
+#define F_SETFL 4 /* set file->f_flags */
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+#define F_SETSIG 10 /* for sockets. */
+#define F_GETSIG 11 /* for sockets. */
+
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* for leases */
+#define F_INPROGRESS 16
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+#define LOCK_MAND 32 /* This is a mandatory flock */
+#define LOCK_READ 64 /* ... Which allows concurrent read operations */
+#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
+#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
+
+#ifdef __KERNEL__
+#define F_POSIX 1
+#define F_FLOCK 2
+#define F_BROKEN 4 /* broken flock() emulation */
+#endif /* __KERNEL__ */
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
+#define F_LINUX_SPECIFIC_BASE 1024
+#endif /* _PPC64_FCNTL_H */
--- /dev/null
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_PPC64_FLOPPY_H
+#define __ASM_PPC64_FLOPPY_H
+
+#include <linux/config.h>
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(port,value) outb_p(port,value)
+
+#define fd_enable_dma() enable_dma(FLOPPY_DMA)
+#define fd_disable_dma() disable_dma(FLOPPY_DMA)
+#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma() free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_cacheflush(addr,size) /* nothing */
+#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \
+ SA_INTERRUPT|SA_SAMPLE_RANDOM, \
+ "floppy", NULL)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+
+#define fd_dma_setup(addr,size,mode,io) ppc64_fd_dma_setup(addr,size,mode,io)
+
+extern struct pci_dev *ppc64_floppy_dev;
+
+static __inline__ int
+ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ static unsigned long prev_size;
+ static dma_addr_t bus_addr = 0;
+ static char *prev_addr;
+ static int prev_dir;
+ int dir;
+
+ dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+
+ if (bus_addr
+ && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
+ /* different from last time -- unmap prev */
+ pci_unmap_single(ppc64_floppy_dev, bus_addr, prev_size, prev_dir);
+ bus_addr = 0;
+ }
+
+ if (!bus_addr) /* need to map it */ {
+ bus_addr = pci_map_single(ppc64_floppy_dev, addr, size, dir);
+ }
+
+ /* remember this one as prev */
+ prev_addr = addr;
+ prev_size = size;
+ prev_dir = dir;
+
+ fd_clear_dma_ff();
+ fd_cacheflush(addr, size);
+ fd_set_dma_mode(mode);
+ set_dma_addr(FLOPPY_DMA, bus_addr);
+ fd_set_dma_count(size);
+ virtual_dma_port = io;
+ fd_enable_dma();
+
+ return 0;
+}
+
+#endif /* CONFIG_PCI */
+
+__inline__ void virtual_dma_init(void)
+{
+ /* Nothing to do on PowerPC */
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Again, the CMOS information not available
+ */
+#define FLOPPY0_TYPE 6
+#define FLOPPY1_TYPE 0
+
+#define N_FDC 2 /* Don't change this! */
+#define N_DRIVE 8
+
+#define FLOPPY_MOTOR_MASK 0xf0
+
+/*
+ * The PowerPC has no problems with floppy DMA crossing 64k borders.
+ */
+#define CROSS_64KB(a,s) (0)
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_PPC64_FLOPPY_H */
--- /dev/null
+#ifdef __KERNEL__
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+/*
+ * Use a brlock for the global irq lock, based on sparc64.
+ * Anton Blanchard <anton@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/brlock.h>
+#include <linux/spinlock.h>
+
+
+typedef struct {
+ unsigned long __softirq_pending;
+#ifndef CONFIG_SMP
+ unsigned int __local_irq_count;
+#else
+ unsigned int __unused_on_SMP; /* We use brlocks on SMP */
+#endif
+ unsigned int __local_bh_count;
+ unsigned int __syscall_count;
+ unsigned long __unused;
+ struct task_struct * __ksoftirqd_task;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+/* Note that local_irq_count() is replaced by ppc64 specific version for SMP */
+
+#ifndef CONFIG_SMP
+#define irq_enter(cpu) (local_irq_count(cpu)++)
+#define irq_exit(cpu) (local_irq_count(cpu)--)
+#else
+#undef local_irq_count
+#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
+#define irq_enter(cpu) br_read_lock(BR_GLOBALIRQ_LOCK)
+#define irq_exit(cpu) br_read_unlock(BR_GLOBALIRQ_LOCK)
+#endif
+
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
+
+/* This tests only the local processors hw IRQ context disposition. */
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+
+#ifndef CONFIG_SMP
+
+#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
+#define hardirq_endlock(cpu) do { } while (0)
+
+#define synchronize_irq() barrier()
+#define release_irqlock(cpu) do { } while (0)
+
+#else /* CONFIG_SMP */
+
+static __inline__ int irqs_running(void)
+{
+ int i;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ if (local_irq_count(cpu_logical_map(i)))
+ return 1;
+ return 0;
+}
+
+extern unsigned char global_irq_holder;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore... */
+ if(global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ br_write_unlock(BR_GLOBALIRQ_LOCK);
+ }
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
+
+ return (!local_irq_count(cpu) && !spin_is_locked(lock));
+}
+
+#define hardirq_endlock(cpu) do { (void)(cpu); } while (0)
+
+extern void synchronize_irq(void);
+
+#endif /* CONFIG_SMP */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_HARDIRQ_H */
--- /dev/null
+/*
+ * linux/include/asm-ppc/hdreg.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This file contains the ppc architecture specific IDE code.
+ */
+
+#ifndef __ASMPPC64_HDREG_H
+#define __ASMPPC64_HDREG_H
+
+typedef unsigned long ide_ioreg_t;
+
+#endif /* __ASMPPC64_HDREG_H */
+
--- /dev/null
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ *
+ * Use inline IRQs where possible - Anton Blanchard <anton@au.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifdef __KERNEL__
+#ifndef _PPC64_HW_IRQ_H
+#define _PPC64_HW_IRQ_H
+
+#include <linux/config.h>
+#include <asm/irq.h>
+
+int timer_interrupt(struct pt_regs *);
+
+#ifdef CONFIG_PPC_ISERIES
+
+extern void __no_use_sti(void);
+extern void __no_use_cli(void);
+extern void __no_use_restore_flags(unsigned long);
+extern unsigned long __no_use_save_flags(void);
+extern void __no_use_set_lost(unsigned long);
+extern void __no_lpq_restore_flags(unsigned long);
+
+#define __cli() __no_use_cli()
+#define __sti() __no_use_sti()
+#define __save_flags(flags) ((flags) = __no_use_save_flags())
+#define __restore_flags(flags) __no_use_restore_flags((unsigned long)flags)
+#define __save_and_cli(flags) ({__save_flags(flags);__cli();})
+
+#else
+
+#define __save_flags(flags) ((flags) = mfmsr())
+#define __restore_flags(flags) mtmsrd(flags)
+
+static inline void __cli(void)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ mtmsrd(msr & ~MSR_EE);
+ __asm__ __volatile__("": : :"memory");
+}
+
+static inline void __sti(void)
+{
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ mtmsrd(msr | MSR_EE);
+}
+
+static inline void __do_save_and_cli(unsigned long *flags)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ *flags = msr;
+ mtmsrd(msr & ~MSR_EE);
+ __asm__ __volatile__("": : :"memory");
+}
+
+#define __save_and_cli(flags) __do_save_and_cli(&flags)
+
+#endif /* CONFIG_PPC_ISERIES */
+
+#define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
+#define unmask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->enable) irq_desc[irq].handler->enable(irq);})
+#define ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
+
+/* Should we handle this via lost interrupts and IPIs or should we don't care like
+ * we do now ? --BenH.
+ */
+struct hw_interrupt_type;
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+
+#endif /* _PPC64_HW_IRQ_H */
+#endif /* __KERNEL__ */
--- /dev/null
+/*
+ * HvCall.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//===========================================================================
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from the OS.
+//
+//===========================================================================
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include "HvCallSc.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+#include <asm/Paca.h>
+
+//-------------------------------------------------------------------
+// Constants
+//-------------------------------------------------------------------
+#ifndef _HVCALL_H
+#define _HVCALL_H
+/*
+enum HvCall_ReturnCode
+{
+ HvCall_Good = 0,
+ HvCall_Partial = 1,
+ HvCall_NotOwned = 2,
+ HvCall_NotFreed = 3,
+ HvCall_UnspecifiedError = 4
+};
+
+enum HvCall_TypeOfSIT
+{
+ HvCall_ReduceOnly = 0,
+ HvCall_Unconditional = 1
+};
+
+enum HvCall_TypeOfYield
+{
+ HvCall_YieldTimed = 0, // Yield until specified time
+ HvCall_YieldToActive = 1, // Yield until all active procs have run
+ HvCall_YieldToProc = 2 // Yield until the specified processor has run
+};
+
+enum HvCall_InterruptMasks
+{
+ HvCall_MaskIPI = 0x00000001,
+ HvCall_MaskLpEvent = 0x00000002,
+ HvCall_MaskLpProd = 0x00000004,
+ HvCall_MaskTimeout = 0x00000008
+};
+
+enum HvCall_VaryOffChunkRc
+{
+ HvCall_VaryOffSucceeded = 0,
+ HvCall_VaryOffWithdrawn = 1,
+ HvCall_ChunkInLoadArea = 2,
+ HvCall_ChunkInHPT = 3,
+ HvCall_ChunkNotAccessible = 4,
+ HvCall_ChunkInUse = 5
+};
+*/
+
+/* Type of yield for HvCallBaseYieldProcessor */
+#define HvCall_YieldTimed 0 // Yield until specified time (tb)
+#define HvCall_YieldToActive 1 // Yield until all active procs have run
+#define HvCall_YieldToProc 2 // Yield until the specified processor has run
+
+/* interrupt masks for setEnabledInterrupts */
+#define HvCall_MaskIPI 0x00000001
+#define HvCall_MaskLpEvent 0x00000002
+#define HvCall_MaskLpProd 0x00000004
+#define HvCall_MaskTimeout 0x00000008
+
+/* Log buffer formats */
+#define HvCall_LogBuffer_ASCII 0
+#define HvCall_LogBuffer_EBCDIC 1
+
+#define HvCallBaseAckDeferredInts HvCallBase + 0
+#define HvCallBaseCpmPowerOff HvCallBase + 1
+#define HvCallBaseGetHwPatch HvCallBase + 2
+#define HvCallBaseReIplSpAttn HvCallBase + 3
+#define HvCallBaseSetASR HvCallBase + 4
+#define HvCallBaseSetASRAndRfi HvCallBase + 5
+#define HvCallBaseSetIMR HvCallBase + 6
+#define HvCallBaseSendIPI HvCallBase + 7
+#define HvCallBaseTerminateMachine HvCallBase + 8
+#define HvCallBaseTerminateMachineSrc HvCallBase + 9
+#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
+#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
+#define HvCallBaseSetVirtualSIT HvCallBase + 12
+#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
+#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
+#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
+#define HvCallBaseSendLpProd HvCallBase + 16
+#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
+#define HvCallBaseYieldProcessor HvCallBase + 18
+#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
+#define HvCallBaseSetVirtualDecr HvCallBase + 20
+#define HvCallBaseClearLogBuffer HvCallBase + 21
+#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
+#define HvCallBaseGetLogBufferFormat HvCallBase + 23
+#define HvCallBaseGetLogBufferLength HvCallBase + 24
+#define HvCallBaseReadLogBuffer HvCallBase + 25
+#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
+#define HvCallBaseWriteLogBuffer HvCallBase + 27
+#define HvCallBaseRouter28 HvCallBase + 28
+#define HvCallBaseRouter29 HvCallBase + 29
+#define HvCallBaseRouter30 HvCallBase + 30
+//=====================================================================================
+static inline void HvCall_setVirtualDecr(void)
+{
+ // Ignore any error return codes - most likely means that the target value for the
+ // LP has been increased and this vary off would bring us below the new target.
+ HvCall0(HvCallBaseSetVirtualDecr);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//=====================================================================
+static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
+{
+ HvCall2( HvCallBaseYieldProcessor, typeOfYield, yieldParm );
+}
+//=====================================================================
+static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
+{
+ HvCall1(HvCallBaseSetEnabledInterrupts,enabledInterrupts);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+
+//=====================================================================
+static inline void HvCall_clearLogBuffer(HvLpIndex lpindex)
+{
+ HvCall1(HvCallBaseClearLogBuffer,lpindex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+
+//=====================================================================
+static inline u32 HvCall_getLogBufferCodePage(HvLpIndex lpindex)
+{
+ u32 retVal = HvCall1(HvCallBaseGetLogBufferCodePage,lpindex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+
+//=====================================================================
+static inline int HvCall_getLogBufferFormat(HvLpIndex lpindex)
+{
+ int retVal = HvCall1(HvCallBaseGetLogBufferFormat,lpindex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+
+//=====================================================================
+static inline u32 HvCall_getLogBufferLength(HvLpIndex lpindex)
+{
+ u32 retVal = HvCall1(HvCallBaseGetLogBufferLength,lpindex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+
+//=====================================================================
+static inline void HvCall_setLogBufferFormatAndCodepage(int format, u32 codePage)
+{
+ HvCall2(HvCallBaseSetLogBufferFormatAndCodePage,format, codePage);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+
+//=====================================================================
+int HvCall_readLogBuffer(HvLpIndex lpindex, void *buffer, u64 bufLen);
+void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
+
+//=====================================================================
+static inline void HvCall_sendIPI(struct Paca * targetPaca)
+{
+ HvCall1( HvCallBaseSendIPI, targetPaca->xPacaIndex );
+}
+
+//=====================================================================
+static inline void HvCall_terminateMachineSrc(void)
+{
+ HvCall0( HvCallBaseTerminateMachineSrc );
+}
+
+
+#endif // _HVCALL_H
+
--- /dev/null
+/*
+ * HvCallCfg.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=====================================================================================
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from the OS.
+//
+//=====================================================================================
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include "HvCallSc.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+//-------------------------------------------------------------------------------------
+// Constants
+//-------------------------------------------------------------------------------------
+#ifndef _HVCALLCFG_H
+#define _HVCALLCFG_H
+
+enum HvCallCfg_ReqQual
+{
+ HvCallCfg_Cur = 0,
+ HvCallCfg_Init = 1,
+ HvCallCfg_Max = 2,
+ HvCallCfg_Min = 3
+};
+
+#define HvCallCfgGetLps HvCallCfg + 0
+#define HvCallCfgGetActiveLpMap HvCallCfg + 1
+#define HvCallCfgGetLpVrmIndex HvCallCfg + 2
+#define HvCallCfgGetLpMinSupportedPlicVrmIndex HvCallCfg + 3
+#define HvCallCfgGetLpMinCompatablePlicVrmIndex HvCallCfg + 4
+#define HvCallCfgGetLpVrmName HvCallCfg + 5
+#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
+#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
+#define HvCallCfgGetSystemMsChunks HvCallCfg + 8
+#define HvCallCfgGetMsChunks HvCallCfg + 9
+#define HvCallCfgGetInteractivePercentage HvCallCfg + 10
+#define HvCallCfgIsBusDedicated HvCallCfg + 11
+#define HvCallCfgGetBusOwner HvCallCfg + 12
+#define HvCallCfgGetBusAllocation HvCallCfg + 13
+#define HvCallCfgGetBusUnitOwner HvCallCfg + 14
+#define HvCallCfgGetBusUnitAllocation HvCallCfg + 15
+#define HvCallCfgGetVirtualBusPool HvCallCfg + 16
+#define HvCallCfgGetBusUnitInterruptProc HvCallCfg + 17
+#define HvCallCfgGetConfiguredBusUnitsForIntProc HvCallCfg + 18
+#define HvCallCfgGetRioSanBusPool HvCallCfg + 19
+#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
+#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
+#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
+#define HvCallCfgRouter23 HvCallCfg + 23
+#define HvCallCfgRouter24 HvCallCfg + 24
+#define HvCallCfgRouter25 HvCallCfg + 25
+#define HvCallCfgRouter26 HvCallCfg + 26
+#define HvCallCfgRouter27 HvCallCfg + 27
+#define HvCallCfgGetMinRuntimeMsChunks HvCallCfg + 28
+#define HvCallCfgSetMinRuntimeMsChunks HvCallCfg + 29
+#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
+#define HvCallCfgGetLpExecutionMode HvCallCfg + 31
+#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
+
+//====================================================================
+static inline HvLpIndex HvCallCfg_getLps(void)
+{
+ HvLpIndex retVal = HvCall0(HvCallCfgGetLps);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline int HvCallCfg_isBusDedicated(u64 busIndex)
+{
+ int retVal = HvCall1(HvCallCfgIsBusDedicated,busIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline HvLpIndex HvCallCfg_getBusOwner(u64 busIndex)
+{
+ HvLpIndex retVal = HvCall1(HvCallCfgGetBusOwner,busIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline HvLpIndexMap HvCallCfg_getBusAllocation(u64 busIndex)
+{
+ HvLpIndexMap retVal = HvCall1(HvCallCfgGetBusAllocation,busIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline HvLpIndexMap HvCallCfg_getActiveLpMap(void)
+{
+ HvLpIndexMap retVal = HvCall0(HvCallCfgGetActiveLpMap);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline HvLpVirtualLanIndexMap HvCallCfg_getVirtualLanIndexMap(HvLpIndex lp)
+{
+ // This is a new function in V5R1 so calls to this on older
+ // hypervisors will return -1
+ u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
+ if(retVal == -1)
+ retVal = 0;
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_getSystemMsChunks(void)
+{
+ u64 retVal = HvCall0(HvCallCfgGetSystemMsChunks);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_getMsChunks(HvLpIndex lp,enum HvCallCfg_ReqQual qual)
+{
+ u64 retVal = HvCall2(HvCallCfgGetMsChunks,lp,qual);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_getMinRuntimeMsChunks(HvLpIndex lp)
+{
+ // NOTE: This function was added in v5r1 so older hypervisors will return a -1 value
+ u64 retVal = HvCall1(HvCallCfgGetMinRuntimeMsChunks,lp);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_setMinRuntimeMsChunks(u64 chunks)
+{
+ u64 retVal = HvCall1(HvCallCfgSetMinRuntimeMsChunks,chunks);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_getSystemPhysicalProcessors(void)
+{
+ u64 retVal = HvCall0(HvCallCfgGetSystemPhysicalProcessors);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_getPhysicalProcessors(HvLpIndex lp,enum HvCallCfg_ReqQual qual)
+{
+ u64 retVal = HvCall2(HvCallCfgGetPhysicalProcessors,lp,qual);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline u64 HvCallCfg_getConfiguredBusUnitsForInterruptProc(HvLpIndex lp,
+ u16 hvLogicalProcIndex)
+{
+ u64 retVal = HvCall2(HvCallCfgGetConfiguredBusUnitsForIntProc,lp,hvLogicalProcIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+
+}
+//==================================================================
+static inline HvLpSharedPoolIndex HvCallCfg_getSharedPoolIndex(HvLpIndex lp)
+{
+ HvLpSharedPoolIndex retVal =
+ HvCall1(HvCallCfgGetSharedPoolIndex,lp);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+
+}
+//==================================================================
+static inline u64 HvCallCfg_getSharedProcUnits(HvLpIndex lp,enum HvCallCfg_ReqQual qual)
+{
+ u64 retVal = HvCall2(HvCallCfgGetSharedProcUnits,lp,qual);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+
+}
+//==================================================================
+static inline u64 HvCallCfg_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
+{
+ u16 retVal = HvCall1(HvCallCfgGetNumProcsInSharedPool,sPI);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+
+}
+//==================================================================
+static inline HvLpIndex HvCallCfg_getHostingLpIndex(HvLpIndex lp)
+{
+ u64 retVal = HvCall1(HvCallCfgGetHostingLpIndex,lp);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+
+}
+
+#endif // _HVCALLCFG_H
+
--- /dev/null
+/*
+ * HvCallEvent.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//==================================================================
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from the OS.
+//
+//==================================================================
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include <asm/iSeries/HvCallSc.h>
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+#include <asm/abs_addr.h>
+
+//-------------------------------------------------------------------
+// Other Includes
+//-------------------------------------------------------------------
+
+//-------------------------------------------------------------------
+// Constants
+//-------------------------------------------------------------------
+#ifndef _HVCALLEVENT_H
+#define _HVCALLEVENT_H
+
+struct HvLpEvent;
+
+typedef u8 HvLpEvent_Type;
+typedef u8 HvLpEvent_AckInd;
+typedef u8 HvLpEvent_AckType;
+
+struct HvCallEvent_PackedParms
+{
+ u8 xAckType:1;
+ u8 xAckInd:1;
+ u8 xRsvd:1;
+ u8 xTargetLp:5;
+ u8 xType;
+ u16 xSubtype;
+ HvLpInstanceId xSourceInstId;
+ HvLpInstanceId xTargetInstId;
+};
+
+typedef u8 HvLpDma_Direction;
+typedef u8 HvLpDma_AddressType;
+
+struct HvCallEvent_PackedDmaParms
+{
+ u8 xDirection:1;
+ u8 xLocalAddrType:1;
+ u8 xRemoteAddrType:1;
+ u8 xRsvd1:5;
+ HvLpIndex xRemoteLp;
+ u8 xType;
+ u8 xRsvd2;
+ HvLpInstanceId xLocalInstId;
+ HvLpInstanceId xRemoteInstId;
+};
+
+typedef u64 HvLpEvent_Rc;
+typedef u64 HvLpDma_Rc;
+
+#define HvCallEventAckLpEvent HvCallEvent + 0
+#define HvCallEventCancelLpEvent HvCallEvent + 1
+#define HvCallEventCloseLpEventPath HvCallEvent + 2
+#define HvCallEventDmaBufList HvCallEvent + 3
+#define HvCallEventDmaSingle HvCallEvent + 4
+#define HvCallEventDmaToSp HvCallEvent + 5
+#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
+#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
+#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
+#define HvCallEventOpenLpEventPath HvCallEvent + 9
+#define HvCallEventSetLpEventStack HvCallEvent + 10
+#define HvCallEventSignalLpEvent HvCallEvent + 11
+#define HvCallEventSignalLpEventParms HvCallEvent + 12
+#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
+#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
+#define HvCallEventRouter15 HvCallEvent + 15
+
+//======================================================================
+static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
+{
+ HvCall1(HvCallEventGetOverflowLpEvents,queueIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//======================================================================
+static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
+{
+ HvCall1(HvCallEventSetInterLpQueueIndex,queueIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//======================================================================
+static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
+ char * eventStackAddr,
+ u32 eventStackSize)
+{
+ u64 abs_addr;
+ abs_addr = virt_to_absolute( (unsigned long) eventStackAddr );
+
+ HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, eventStackSize);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//======================================================================
+static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
+ u16 lpLogicalProcIndex)
+{
+ HvCall2(HvCallEventSetLpEventQueueInterruptProc,queueIndex,lpLogicalProcIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//=====================================================================
+static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent* event)
+{
+ u64 abs_addr;
+ HvLpEvent_Rc retVal;
+#ifdef DEBUG_SENDEVENT
+ printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", (unsigned long)event);
+#endif
+ abs_addr = virt_to_absolute( (unsigned long) event );
+ retVal = (HvLpEvent_Rc)HvCall1(HvCallEventSignalLpEvent, abs_addr);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=====================================================================
+static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
+ HvLpEvent_Type type,
+ u16 subtype,
+ HvLpEvent_AckInd ackInd,
+ HvLpEvent_AckType ackType,
+ HvLpInstanceId sourceInstanceId,
+ HvLpInstanceId targetInstanceId,
+ u64 correlationToken,
+ u64 eventData1,
+ u64 eventData2,
+ u64 eventData3,
+ u64 eventData4,
+ u64 eventData5)
+{
+ HvLpEvent_Rc retVal;
+
+ // Pack the misc bits into a single Dword to pass to PLIC
+ union
+ {
+ struct HvCallEvent_PackedParms parms;
+ u64 dword;
+ } packed;
+ packed.parms.xAckType = ackType;
+ packed.parms.xAckInd = ackInd;
+ packed.parms.xRsvd = 0;
+ packed.parms.xTargetLp = targetLp;
+ packed.parms.xType = type;
+ packed.parms.xSubtype = subtype;
+ packed.parms.xSourceInstId = sourceInstanceId;
+ packed.parms.xTargetInstId = targetInstanceId;
+
+ retVal = (HvLpEvent_Rc)HvCall7(HvCallEventSignalLpEventParms,
+ packed.dword,
+ correlationToken,
+ eventData1,eventData2,
+ eventData3,eventData4,
+ eventData5);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent* event)
+{
+ u64 abs_addr;
+ HvLpEvent_Rc retVal;
+ abs_addr = virt_to_absolute( (unsigned long) event );
+
+ retVal = (HvLpEvent_Rc)HvCall1(HvCallEventAckLpEvent, abs_addr);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//====================================================================
+static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent* event)
+{
+ u64 abs_addr;
+ HvLpEvent_Rc retVal;
+ abs_addr = virt_to_absolute( (unsigned long) event );
+
+ retVal = (HvLpEvent_Rc)HvCall1(HvCallEventCancelLpEvent, abs_addr);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(HvLpIndex targetLp, HvLpEvent_Type type)
+{
+ HvLpInstanceId retVal;
+ retVal = HvCall2(HvCallEventGetSourceLpInstanceId,targetLp,type);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(HvLpIndex targetLp, HvLpEvent_Type type)
+{
+ HvLpInstanceId retVal;
+ retVal = HvCall2(HvCallEventGetTargetLpInstanceId,targetLp,type);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//===================================================================
+static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
+ HvLpEvent_Type type)
+{
+ HvCall2(HvCallEventOpenLpEventPath,targetLp,type);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//===================================================================
+static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
+ HvLpEvent_Type type)
+{
+ HvCall2(HvCallEventCloseLpEventPath,targetLp,type);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//===================================================================
+static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
+ HvLpIndex remoteLp,
+ HvLpDma_Direction direction,
+ HvLpInstanceId localInstanceId,
+ HvLpInstanceId remoteInstanceId,
+ HvLpDma_AddressType localAddressType,
+ HvLpDma_AddressType remoteAddressType,
+ // Do these need to be converted to
+ // absolute addresses?
+ u64 localBufList,
+ u64 remoteBufList,
+
+ u32 transferLength)
+{
+ HvLpDma_Rc retVal;
+ // Pack the misc bits into a single Dword to pass to PLIC
+ union
+ {
+ struct HvCallEvent_PackedDmaParms parms;
+ u64 dword;
+ } packed;
+ packed.parms.xDirection = direction;
+ packed.parms.xLocalAddrType = localAddressType;
+ packed.parms.xRemoteAddrType = remoteAddressType;
+ packed.parms.xRsvd1 = 0;
+ packed.parms.xRemoteLp = remoteLp;
+ packed.parms.xType = type;
+ packed.parms.xRsvd2 = 0;
+ packed.parms.xLocalInstId = localInstanceId;
+ packed.parms.xRemoteInstId = remoteInstanceId;
+
+ retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaBufList,
+ packed.dword,
+ localBufList,
+ remoteBufList,
+ transferLength);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=================================================================
+static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
+ HvLpIndex remoteLp,
+ HvLpDma_Direction direction,
+ HvLpInstanceId localInstanceId,
+ HvLpInstanceId remoteInstanceId,
+ HvLpDma_AddressType localAddressType,
+ HvLpDma_AddressType remoteAddressType,
+ u64 localAddrOrTce,
+ u64 remoteAddrOrTce,
+ u32 transferLength)
+{
+ HvLpDma_Rc retVal;
+ // Pack the misc bits into a single Dword to pass to PLIC
+ union
+ {
+ struct HvCallEvent_PackedDmaParms parms;
+ u64 dword;
+ } packed;
+ packed.parms.xDirection = direction;
+ packed.parms.xLocalAddrType = localAddressType;
+ packed.parms.xRemoteAddrType = remoteAddressType;
+ packed.parms.xRsvd1 = 0;
+ packed.parms.xRemoteLp = remoteLp;
+ packed.parms.xType = type;
+ packed.parms.xRsvd2 = 0;
+ packed.parms.xLocalInstId = localInstanceId;
+ packed.parms.xRemoteInstId = remoteInstanceId;
+
+ retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle,
+ packed.dword,
+ localAddrOrTce,
+ remoteAddrOrTce,
+ transferLength);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=================================================================
+static inline HvLpDma_Rc HvCallEvent_dmaToSp(void* local, u32 remote, u32 length, HvLpDma_Direction dir)
+{
+ u64 abs_addr;
+ HvLpDma_Rc retVal;
+ abs_addr = virt_to_absolute( (unsigned long) local );
+
+ retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaToSp,
+ abs_addr,
+ remote,
+ length,
+ dir);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//================================================================
+
+#endif // _HVCALLEVENT_H
+
--- /dev/null
+/*
+ * HvCallHpt.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//============================================================================
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from the OS.
+//
+//============================================================================
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include "HvCallSc.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+//-------------------------------------------------------------------
+// Other Includes
+//-------------------------------------------------------------------
+
+#ifndef _PPC_MMU_H
+#include <asm/mmu.h>
+#endif
+
+//-----------------------------------------------------------------------------
+// Constants
+//-----------------------------------------------------------------------------
+#ifndef _HVCALLHPT_H
+#define _HVCALLHPT_H
+
+#define HvCallHptGetHptAddress HvCallHpt + 0
+#define HvCallHptGetHptPages HvCallHpt + 1
+#define HvCallHptSetPp HvCallHpt + 5
+#define HvCallHptSetSwBits HvCallHpt + 6
+#define HvCallHptUpdate HvCallHpt + 7
+#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
+#define HvCallHptGet HvCallHpt + 11
+#define HvCallHptFindNextValid HvCallHpt + 12
+#define HvCallHptFindValid HvCallHpt + 13
+#define HvCallHptAddValidate HvCallHpt + 16
+#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
+
+
+//============================================================================
+static inline u64 HvCallHpt_getHptAddress(void)
+{
+ u64 retval = HvCall0(HvCallHptGetHptAddress);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retval;
+}
+//============================================================================
+static inline u64 HvCallHpt_getHptPages(void)
+{
+ u64 retval = HvCall0(HvCallHptGetHptPages);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retval;
+}
+//=============================================================================
+static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
+{
+ HvCall2( HvCallHptSetPp, hpteIndex, value );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//=============================================================================
+static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff )
+{
+ HvCall3( HvCallHptSetSwBits, hpteIndex, bitson, bitsoff );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//=============================================================================
+static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
+
+{
+ HvCall1( HvCallHptInvalidateNoSyncICache, hpteIndex );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//=============================================================================
+static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson, u8 bitsoff )
+
+{
+ u64 compressedStatus;
+ compressedStatus = HvCall4( HvCallHptInvalidateSetSwBitsGet, hpteIndex, bitson, bitsoff, 1 );
+ HvCall1( HvCallHptInvalidateNoSyncICache, hpteIndex );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return compressedStatus;
+}
+//=============================================================================
+static inline u64 HvCallHpt_findValid( struct _HPTE *hpte, u64 vpn )
+{
+ u64 retIndex = HvCall3Ret16( HvCallHptFindValid, hpte, vpn, 0, 0 );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retIndex;
+}
+//=============================================================================
+static inline u64 HvCallHpt_findNextValid( struct _HPTE *hpte, u32 hpteIndex, u8 bitson, u8 bitsoff )
+{
+ u64 retIndex = HvCall3Ret16( HvCallHptFindNextValid, hpte, hpteIndex, bitson, bitsoff );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retIndex;
+}
+//=============================================================================
+static inline void HvCallHpt_get( struct _HPTE *hpte, u32 hpteIndex )
+{
+ HvCall2Ret16( HvCallHptGet, hpte, hpteIndex, 0 );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//============================================================================
+static inline void HvCallHpt_addValidate( u32 hpteIndex,
+ u32 hBit,
+ struct _HPTE *hpte )
+
+{
+ HvCall4( HvCallHptAddValidate, hpteIndex,
+ hBit, (*((u64 *)hpte)), (*(((u64 *)hpte)+1)) );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+
+
+//=============================================================================
+
+#endif // _HVCALLHPT_H
+
--- /dev/null
+/************************************************************************/
+/* Provides the Hypervisor PCI calls for iSeries Linux Parition. */
+/* Copyright (C) 20yy <Wayne G Holm> <IBM Corporation> */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created, Jan 9, 2001 */
+/************************************************************************/
+//============================================================================
+// Header File Id
+// Name______________: HvCallPci.H
+//
+// Description_______:
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from SLIC.
+//
+//============================================================================
+
+//-------------------------------------------------------------------
+// Forward declarations
+//-------------------------------------------------------------------
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include "HvCallSc.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+//-------------------------------------------------------------------
+// Other Includes
+//-------------------------------------------------------------------
+
+
+//-----------------------------------------------------------------------------
+// Constants
+//-----------------------------------------------------------------------------
+#ifndef _HVCALLPCI_H
+#define _HVCALLPCI_H
+
+struct HvCallPci_DsaAddr { // make sure this struct size is 64-bits total
+ u16 busNumber;
+ u8 subBusNumber;
+ u8 deviceId;
+ u8 barNumber;
+ u8 reserved[3];
+};
+union HvDsaMap {
+ u64 DsaAddr;
+ struct HvCallPci_DsaAddr Dsa;
+};
+
+struct HvCallPci_LoadReturn {
+ u64 rc;
+ u64 value;
+};
+
+enum HvCallPci_DeviceType {HvCallPci_NodeDevice = 1,
+ HvCallPci_SpDevice = 2,
+ HvCallPci_IopDevice = 3,
+ HvCallPci_BridgeDevice = 4,
+ HvCallPci_MultiFunctionDevice = 5,
+ HvCallPci_IoaDevice = 6
+};
+
+
+struct HvCallPci_DeviceInfo {
+ u32 deviceType; // See DeviceType enum for values
+};
+
+struct HvCallPci_BusUnitInfo {
+ u32 sizeReturned; // length of data returned
+ u32 deviceType; // see DeviceType enum for values
+};
+
+struct HvCallPci_BridgeInfo {
+ struct HvCallPci_BusUnitInfo busUnitInfo; // Generic bus unit info
+ u8 subBusNumber; // Bus number of secondary bus
+ u8 maxAgents; // Max idsels on secondary bus
+};
+
+
+// Maximum BusUnitInfo buffer size. Provided for clients so they can allocate
+// a buffer big enough for any type of bus unit. Increase as needed.
+enum {HvCallPci_MaxBusUnitInfoSize = 128};
+
+struct HvCallPci_BarParms {
+ u64 vaddr;
+ u64 raddr;
+ u64 size;
+ u64 protectStart;
+ u64 protectEnd;
+ u64 relocationOffset;
+ u64 pciAddress;
+ u64 reserved[3];
+};
+
+enum HvCallPci_VpdType {
+ HvCallPci_BusVpd = 1,
+ HvCallPci_BusAdapterVpd = 2
+};
+
+#define HvCallPciConfigLoad8 HvCallPci + 0
+#define HvCallPciConfigLoad16 HvCallPci + 1
+#define HvCallPciConfigLoad32 HvCallPci + 2
+#define HvCallPciConfigStore8 HvCallPci + 3
+#define HvCallPciConfigStore16 HvCallPci + 4
+#define HvCallPciConfigStore32 HvCallPci + 5
+#define HvCallPciEoi HvCallPci + 16
+#define HvCallPciGetBarParms HvCallPci + 18
+#define HvCallPciMaskFisr HvCallPci + 20
+#define HvCallPciUnmaskFisr HvCallPci + 21
+#define HvCallPciSetSlotReset HvCallPci + 25
+#define HvCallPciGetDeviceInfo HvCallPci + 27
+#define HvCallPciGetCardVpd HvCallPci + 28
+#define HvCallPciBarLoad8 HvCallPci + 40
+#define HvCallPciBarLoad16 HvCallPci + 41
+#define HvCallPciBarLoad32 HvCallPci + 42
+#define HvCallPciBarLoad64 HvCallPci + 43
+#define HvCallPciBarStore8 HvCallPci + 44
+#define HvCallPciBarStore16 HvCallPci + 45
+#define HvCallPciBarStore32 HvCallPci + 46
+#define HvCallPciBarStore64 HvCallPci + 47
+#define HvCallPciMaskInterrupts HvCallPci + 48
+#define HvCallPciUnmaskInterrupts HvCallPci + 49
+#define HvCallPciGetBusUnitInfo HvCallPci + 50
+
+//============================================================================
+static inline u64 HvCallPci_configLoad8(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset,
+ u8 *value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ HvCall3Ret16(HvCallPciConfigLoad8, &retVal, *(u64 *)&dsa, offset, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *value = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset,
+ u16 *value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *value = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset,
+ u32 *value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *value = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset,
+ u8 value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ retVal = HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_configStore16(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset,
+ u16 value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ retVal = HvCall4(HvCallPciConfigStore16, *(u64 *)&dsa, offset, value, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_configStore32(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u32 offset,
+ u32 value)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumber;
+ dsa.subBusNumber = subBusNumber;
+ dsa.deviceId = deviceId;
+
+ retVal = HvCall4(HvCallPciConfigStore32, *(u64 *)&dsa, offset, value, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_barLoad8(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u8* valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ HvCall3Ret16(HvCallPciBarLoad8, &retVal, *(u64 *)&dsa, offsetParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *valueParm = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_barLoad16(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u16* valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ HvCall3Ret16(HvCallPciBarLoad16, &retVal, *(u64 *)&dsa, offsetParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *valueParm = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_barLoad32(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u32* valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ HvCall3Ret16(HvCallPciBarLoad32, &retVal, *(u64 *)&dsa, offsetParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *valueParm = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_barLoad64(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u64* valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ HvCall3Ret16(HvCallPciBarLoad64, &retVal, *(u64 *)&dsa, offsetParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ *valueParm = retVal.value;
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_barStore8(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u8 valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ retVal = HvCall4(HvCallPciBarStore8, *(u64 *)&dsa, offsetParm, valueParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_barStore16(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u16 valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ retVal = HvCall4(HvCallPciBarStore16, *(u64 *)&dsa, offsetParm, valueParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_barStore32(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u32 valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ retVal = HvCall4(HvCallPciBarStore32, *(u64 *)&dsa, offsetParm, valueParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_barStore64(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 offsetParm,
+ u64 valueParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ retVal = HvCall4(HvCallPciBarStore64, *(u64 *)&dsa, offsetParm, valueParm, 0);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_eoi(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm)
+{
+ struct HvCallPci_DsaAddr dsa;
+ struct HvCallPci_LoadReturn retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal.rc;
+}
+//============================================================================
+static inline u64 HvCallPci_getBarParms(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u8 barNumberParm,
+ u64 parms,
+ u32 sizeofParms)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+ dsa.barNumber = barNumberParm;
+
+ retVal = HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_maskFisr(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u64 fisrMask)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ retVal = HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u64 fisrMask)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ retVal = HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_setSlotReset(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u64 onNotOff)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ retVal = HvCall2(HvCallPciSetSlotReset, *(u64*)&dsa, onNotOff);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceNumberParm,
+ u64 parms,
+ u32 sizeofParms)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceNumberParm << 4;
+
+ retVal = HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u64 interruptMask)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ retVal = HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u64 interruptMask)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ retVal = HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+
+static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm,
+ u8 subBusParm,
+ u8 deviceIdParm,
+ u64 parms,
+ u32 sizeofParms)
+{
+ struct HvCallPci_DsaAddr dsa;
+ u64 retVal;
+
+ *((u64*)&dsa) = 0;
+
+ dsa.busNumber = busNumberParm;
+ dsa.subBusNumber = subBusParm;
+ dsa.deviceId = deviceIdParm;
+
+ retVal = HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms, sizeofParms);
+
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+
+ return retVal;
+}
+//============================================================================
+
+static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm, u16 sizeParm)
+{
+ int xRetSize;
+ u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm, sizeParm, HvCallPci_BusVpd);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ if (xRc == -1)
+ xRetSize = -1;
+ else
+ xRetSize = xRc & 0xFFFF;
+ return xRetSize;
+}
+//============================================================================
+
+static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, u16 sizeParm)
+{
+ int xRetSize;
+ u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm, sizeParm, HvCallPci_BusAdapterVpd);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ if (xRc == -1)
+ xRetSize = -1;
+ else
+ xRetSize = xRc & 0xFFFF;
+ return xRetSize;
+}
+//============================================================================
+#endif // _HVCALLPCI_H
--- /dev/null
+/*
+ * HvCallSc.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+#ifndef _HVCALLSC_H
+#define _HVCALLSC_H
+
+#define HvCallBase 0x8000000000000000
+#define HvCallCfg 0x8002000000000000
+#define HvCallEvent 0x8003000000000000
+#define HvCallHpt 0x8004000000000000
+#define HvCallPci 0x8005000000000000
+#define HvCallSm 0x8007000000000000
+#define HvCallXm 0x8009000000000000
+
+u64 HvCall0( u64 );
+u64 HvCall1( u64, u64 );
+u64 HvCall2( u64, u64, u64 );
+u64 HvCall3( u64, u64, u64, u64 );
+u64 HvCall4( u64, u64, u64, u64, u64 );
+u64 HvCall5( u64, u64, u64, u64, u64, u64 );
+u64 HvCall6( u64, u64, u64, u64, u64, u64, u64 );
+u64 HvCall7( u64, u64, u64, u64, u64, u64, u64, u64 );
+
+u64 HvCall0Ret16( u64, void * );
+u64 HvCall1Ret16( u64, void *, u64 );
+u64 HvCall2Ret16( u64, void *, u64, u64 );
+u64 HvCall3Ret16( u64, void *, u64, u64, u64 );
+u64 HvCall4Ret16( u64, void *, u64, u64, u64, u64 );
+u64 HvCall5Ret16( u64, void *, u64, u64, u64, u64, u64 );
+u64 HvCall6Ret16( u64, void *, u64, u64, u64, u64, u64, u64 );
+u64 HvCall7Ret16( u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64 );
+
+#endif /* _HVCALLSC_H */
--- /dev/null
+/*
+ * HvCallSm.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//============================================================================
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from the OS.
+//
+//============================================================================
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include "HvCallSc.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+//-----------------------------------------------------------------------------
+// Constants
+//-----------------------------------------------------------------------------
+#ifndef _HVCALLSM_H
+#define _HVCALLSM_H
+
+#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
+
+
+//============================================================================
+static inline u64 HvCallSm_get64BitsOfAccessMap(
+ HvLpIndex lpIndex, u64 indexIntoBitMap )
+{
+ u64 retval = HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex,
+ indexIntoBitMap );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retval;
+}
+//============================================================================
+#endif // _HVCALLSM_H
+
--- /dev/null
+//============================================================================
+// Header File Id
+// Name______________: HvCallXm.H
+//
+// Description_______:
+//
+// This file contains the "hypervisor call" interface which is used to
+// drive the hypervisor from SLIC.
+//
+//============================================================================
+
+//-------------------------------------------------------------------
+// Forward declarations
+//-------------------------------------------------------------------
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _HVCALLSC_H
+#include "HvCallSc.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+//-------------------------------------------------------------------
+// Other Includes
+//-------------------------------------------------------------------
+
+
+//-----------------------------------------------------------------------------
+// Constants
+//-----------------------------------------------------------------------------
+#ifndef _HVCALLXM_H
+#define _HVCALLXM_H
+
+#define HvCallXmGetTceTableParms HvCallXm + 0
+#define HvCallXmTestBus HvCallXm + 1
+#define HvCallXmConnectBusUnit HvCallXm + 2
+#define HvCallXmLoadTod HvCallXm + 8
+#define HvCallXmTestBusUnit HvCallXm + 9
+#define HvCallXmSetTce HvCallXm + 11
+#define HvCallXmSetTces HvCallXm + 13
+
+
+
+//============================================================================
+static inline void HvCallXm_getTceTableParms(u64 cb)
+{
+ HvCall1(HvCallXmGetTceTableParms, cb);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+}
+//============================================================================
+static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
+{
+ u64 retval = HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retval;
+}
+//============================================================================
+static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset, u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
+{
+ u64 retval = HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
+ tce1, tce2, tce3, tce4 );
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retval;
+}
+//=============================================================================
+static inline u64 HvCallXm_testBus(u16 busNumber)
+{
+ u64 retVal = HvCall1(HvCallXmTestBus, busNumber);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=====================================================================================
+static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber, u8 deviceId)
+{
+ u64 busUnitNumber = (subBusNumber << 8) | deviceId;
+ u64 retVal = HvCall2(HvCallXmTestBusUnit, busNumber, busUnitNumber);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=====================================================================================
+static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber, u8 deviceId,
+ u64 interruptToken)
+{
+ u64 busUnitNumber = (subBusNumber << 8) | deviceId;
+ u64 queueIndex = 0; // HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard));
+
+ u64 retVal = HvCall5(HvCallXmConnectBusUnit, busNumber, busUnitNumber,
+ interruptToken, 0, queueIndex);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=====================================================================================
+static inline u64 HvCallXm_loadTod(void)
+{
+ u64 retVal = HvCall0(HvCallXmLoadTod);
+ // getPaca()->adjustHmtForNoOfSpinLocksHeld();
+ return retVal;
+}
+//=====================================================================================
+
+#endif // _HVCALLXM_H
+
--- /dev/null
+/*
+ * HvLpConfig.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//===========================================================================
+//
+// This file contains the interface to the LPAR configuration data
+// to determine which resources should be allocated to each partition.
+//
+//===========================================================================
+
+#ifndef _HVCALLCFG_H
+#include "HvCallCfg.h"
+#endif
+
+#ifndef _HVTYPES_H
+#include <asm/iSeries/HvTypes.h>
+#endif
+
+#ifndef _ITLPNACA_H
+#include <asm/iSeries/ItLpNaca.h>
+#endif
+
+#ifndef _LPARDATA_H
+#include <asm/iSeries/LparData.h>
+#endif
+
+#ifndef _HVLPCONFIG_H
+#define _HVLPCONFIG_H
+
+//-------------------------------------------------------------------
+// Constants
+//-------------------------------------------------------------------
+
+extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
+
+//===================================================================
+static inline HvLpIndex HvLpConfig_getLpIndex(void)
+{
+ return itLpNaca.xLpIndex;
+}
+//===================================================================
+static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
+{
+ return itLpNaca.xPrimaryLpIndex;
+}
+//=================================================================
+static inline HvLpIndex HvLpConfig_getLps(void)
+{
+ return HvCallCfg_getLps();
+}
+//=================================================================
+static inline HvLpIndexMap HvLpConfig_getActiveLpMap(void)
+{
+ return HvCallCfg_getActiveLpMap();
+}
+//=================================================================
+static inline u64 HvLpConfig_getSystemMsMegs(void)
+{
+ return HvCallCfg_getSystemMsChunks() / HVCHUNKSPERMEG;
+}
+//=================================================================
+static inline u64 HvLpConfig_getSystemMsChunks(void)
+{
+ return HvCallCfg_getSystemMsChunks();
+}
+//=================================================================
+static inline u64 HvLpConfig_getSystemMsPages(void)
+{
+ return HvCallCfg_getSystemMsChunks() * HVPAGESPERCHUNK;
+}
+//================================================================
+static inline u64 HvLpConfig_getMsMegs(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Cur) / HVCHUNKSPERMEG;
+}
+//================================================================
+static inline u64 HvLpConfig_getMsChunks(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Cur);
+}
+//================================================================
+static inline u64 HvLpConfig_getMsPages(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Cur) * HVPAGESPERCHUNK;
+}
+//================================================================
+static inline u64 HvLpConfig_getMinMsMegs(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Min) / HVCHUNKSPERMEG;
+}
+//================================================================
+static inline u64 HvLpConfig_getMinMsChunks(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Min);
+}
+//================================================================
+static inline u64 HvLpConfig_getMinMsPages(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Min) * HVPAGESPERCHUNK;
+}
+//================================================================
+static inline u64 HvLpConfig_getMinRuntimeMsMegs(void)
+{
+ return HvCallCfg_getMinRuntimeMsChunks(HvLpConfig_getLpIndex()) / HVCHUNKSPERMEG;
+}
+//===============================================================
+static inline u64 HvLpConfig_getMinRuntimeMsChunks(void)
+{
+ return HvCallCfg_getMinRuntimeMsChunks(HvLpConfig_getLpIndex());
+}
+//===============================================================
+static inline u64 HvLpConfig_getMinRuntimeMsPages(void)
+{
+ return HvCallCfg_getMinRuntimeMsChunks(HvLpConfig_getLpIndex()) * HVPAGESPERCHUNK;
+}
+//===============================================================
+static inline u64 HvLpConfig_getMaxMsMegs(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Max) / HVCHUNKSPERMEG;
+}
+//===============================================================
+static inline u64 HvLpConfig_getMaxMsChunks(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Max);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMaxMsPages(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Max) * HVPAGESPERCHUNK;
+}
+//===============================================================
+static inline u64 HvLpConfig_getInitMsMegs(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Init) / HVCHUNKSPERMEG;
+}
+//===============================================================
+static inline u64 HvLpConfig_getInitMsChunks(void)
+{
+ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Init);
+}
+//===============================================================
+static inline u64 HvLpConfig_getInitMsPages(void)
+{ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Init) * HVPAGESPERCHUNK;
+}
+//===============================================================
+static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
+{
+ return HvCallCfg_getSystemPhysicalProcessors();
+}
+//===============================================================
+static inline u64 HvLpConfig_getSystemLogicalProcessors(void)
+{
+ return HvCallCfg_getSystemPhysicalProcessors() * (/*getPaca()->getSecondaryThreadCount() +*/ 1);
+}
+//===============================================================
+static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
+{
+ return HvCallCfg_getNumProcsInSharedPool(sPI);
+}
+//===============================================================
+static inline u64 HvLpConfig_getPhysicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Cur);
+}
+//===============================================================
+static inline u64 HvLpConfig_getLogicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Cur) * (/*getPaca()->getSecondaryThreadCount() +*/ 1);
+}
+//===============================================================
+static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
+{
+ return HvCallCfg_getSharedPoolIndex(HvLpConfig_getLpIndex());
+}
+//===============================================================
+static inline u64 HvLpConfig_getSharedProcUnits(void)
+{
+ return HvCallCfg_getSharedProcUnits(HvLpConfig_getLpIndex(),HvCallCfg_Cur);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMinSharedProcUnits(void)
+{
+ return HvCallCfg_getSharedProcUnits(HvLpConfig_getLpIndex(),HvCallCfg_Min);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
+{
+ return HvCallCfg_getSharedProcUnits(HvLpConfig_getLpIndex(),HvCallCfg_Max);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMinPhysicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Min);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMinLogicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Min) * (/*getPaca()->getSecondaryThreadCount() +*/ 1);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Max);
+}
+//===============================================================
+static inline u64 HvLpConfig_getMaxLogicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Max) * (/*getPaca()->getSecondaryThreadCount() +*/ 1);
+}
+//===============================================================
+static inline u64 HvLpConfig_getInitPhysicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Init);
+}
+//===============================================================
+static inline u64 HvLpConfig_getInitLogicalProcessors(void)
+{
+ return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Init) * (/*getPaca()->getSecondaryThreadCount() +*/ 1);
+}
+//================================================================
+static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
+{
+ return HvCallCfg_getVirtualLanIndexMap(HvLpConfig_getLpIndex_outline());
+}
+//===============================================================
+static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(HvLpIndex lp)
+{
+ return HvCallCfg_getVirtualLanIndexMap(lp);
+}
+//================================================================
+static inline HvLpIndex HvLpConfig_getBusOwner(HvBusNumber busNumber)
+{
+ return HvCallCfg_getBusOwner(busNumber);
+}
+//===============================================================
+static inline int HvLpConfig_isBusDedicated(HvBusNumber busNumber)
+{
+ return HvCallCfg_isBusDedicated(busNumber);
+}
+//================================================================
+static inline HvLpIndexMap HvLpConfig_getBusAllocation(HvBusNumber busNumber)
+{
+ return HvCallCfg_getBusAllocation(busNumber);
+}
+//================================================================
+// returns the absolute real address of the load area
+static inline u64 HvLpConfig_getLoadAddress(void)
+{
+ return itLpNaca.xLoadAreaAddr & 0x7fffffffffffffff;
+}
+//================================================================
+static inline u64 HvLpConfig_getLoadPages(void)
+{
+ return itLpNaca.xLoadAreaChunks * HVPAGESPERCHUNK;
+}
+//================================================================
+static inline int HvLpConfig_isBusOwnedByThisLp(HvBusNumber busNumber)
+{
+ HvLpIndex busOwner = HvLpConfig_getBusOwner(busNumber);
+ return (busOwner == HvLpConfig_getLpIndex());
+}
+//================================================================
+static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1, HvLpIndex lp2)
+{
+ HvLpVirtualLanIndexMap virtualLanIndexMap1 = HvCallCfg_getVirtualLanIndexMap( lp1 );
+ HvLpVirtualLanIndexMap virtualLanIndexMap2 = HvCallCfg_getVirtualLanIndexMap( lp2 );
+ return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
+}
+//================================================================
+static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
+{
+ return HvCallCfg_getHostingLpIndex(lp);
+}
+//================================================================
+
+#endif // _HVLPCONFIG_H
--- /dev/null
+/*
+ * HvLpEvent.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//======================================================================
+//
+// This file contains the class for HV events in the system.
+//
+//=====================================================================
+#ifndef _HVLPEVENT_H
+#define _HVLPEVENT_H
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/iSeries/HvTypes.h>
+#ifndef _HVCALLEVENT_H
+#include <asm/iSeries/HvCallEvent.h>
+#endif
+
+
+//=====================================================================
+//
+// HvLpEvent is the structure for Lp Event messages passed between
+// partitions through PLIC.
+//
+//=====================================================================
+
+struct HvEventFlags
+{
+ u8 xValid:1; // Indicates a valid request x00-x00
+ u8 xRsvd1:4; // Reserved ...
+ u8 xAckType:1; // Immediate or deferred ...
+ u8 xAckInd:1; // Indicates if ACK required ...
+ u8 xFunction:1; // Interrupt or Acknowledge ...
+};
+
+
+struct HvLpEvent
+{
+ struct HvEventFlags xFlags; // Event flags x00-x00
+ u8 xType; // Type of message x01-x01
+ u16 xSubtype; // Subtype for event x02-x03
+ u8 xSourceLp; // Source LP x04-x04
+ u8 xTargetLp; // Target LP x05-x05
+ u8 xSizeMinus1; // Size of Derived class - 1 x06-x06
+ u8 xRc; // RC for Ack flows x07-x07
+ u16 xSourceInstanceId; // Source sides instance id x08-x09
+ u16 xTargetInstanceId; // Target sides instance id x0A-x0B
+ union {
+ u32 xSubtypeData; // Data usable by the subtype x0C-x0F
+ u16 xSubtypeDataShort[2]; // Data as 2 shorts
+ u8 xSubtypeDataChar[4]; // Data as 4 chars
+ } x;
+
+ u64 xCorrelationToken; // Unique value for source/type x10-x17
+};
+
+// Lp Event handler function
+typedef void (*LpEventHandler)(struct HvLpEvent *, struct pt_regs *);
+
+// Register a handler for an event type
+// returns 0 on success
+extern int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler hdlr);
+
+// Unregister a handler for an event type
+// returns 0 on success
+// Unregister will fail if there are any paths open for the type
+extern int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType );
+
+// Open an Lp Event Path for an event type
+// returns 0 on success
+// openPath will fail if there is no handler registered for the event type.
+// The lpIndex specified is the partition index for the target partition
+// (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
+extern int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex );
+
+
+// Close an Lp Event Path for a type and partition
+// returns 0 on sucess
+extern int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex );
+
+#define HvLpEvent_Type_Hypervisor 0
+#define HvLpEvent_Type_MachineFac 1
+#define HvLpEvent_Type_SessionMgr 2
+#define HvLpEvent_Type_SpdIo 3
+#define HvLpEvent_Type_VirtualBus 4
+#define HvLpEvent_Type_PciIo 5
+#define HvLpEvent_Type_RioIo 6
+#define HvLpEvent_Type_VirtualLan 7
+#define HvLpEvent_Type_VirtualIo 8
+#define HvLpEvent_Type_NumTypes 9
+
+#define HvLpEvent_Rc_Good 0
+#define HvLpEvent_Rc_BufferNotAvailable 1
+#define HvLpEvent_Rc_Cancelled 2
+#define HvLpEvent_Rc_GenericError 3
+#define HvLpEvent_Rc_InvalidAddress 4
+#define HvLpEvent_Rc_InvalidPartition 5
+#define HvLpEvent_Rc_InvalidSize 6
+#define HvLpEvent_Rc_InvalidSubtype 7
+#define HvLpEvent_Rc_InvalidSubtypeData 8
+#define HvLpEvent_Rc_InvalidType 9
+#define HvLpEvent_Rc_PartitionDead 10
+#define HvLpEvent_Rc_PathClosed 11
+#define HvLpEvent_Rc_SubtypeError 12
+
+#define HvLpEvent_Function_Ack 0
+#define HvLpEvent_Function_Int 1
+
+#define HvLpEvent_AckInd_NoAck 0
+#define HvLpEvent_AckInd_DoAck 1
+
+#define HvLpEvent_AckType_ImmediateAck 0
+#define HvLpEvent_AckType_DeferredAck 1
+
+#define HvLpDma_Direction_LocalToRemote 0
+#define HvLpDma_Direction_RemoteToLocal 1
+
+#define HvLpDma_AddressType_TceIndex 0
+#define HvLpDma_AddressType_RealAddress 1
+
+#define HvLpDma_Rc_Good 0
+#define HvLpDma_Rc_Error 1
+#define HvLpDma_Rc_PartitionDead 2
+#define HvLpDma_Rc_PathClosed 3
+#define HvLpDma_Rc_InvalidAddress 4
+#define HvLpDma_Rc_InvalidLength 5
+
+#endif // _HVLPEVENT_H
--- /dev/null
+/*
+ * HvReleaseData.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=============================================================================
+//
+// This control block contains the critical information about the
+// release so that it can be changed in the future (ie, the virtual
+// address of the OS's NACA).
+//
+//-----------------------------------------------------------------------------
+// Standard Includes
+//-----------------------------------------------------------------------------
+#ifndef _PPC64_TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _HVRELEASEDATA_H
+#define _HVRELEASEDATA_H
+
+//=============================================================================
+//
+// When we IPL a secondary partition, we will check if if the
+// secondary xMinPlicVrmIndex > the primary xVrmIndex.
+// If it is then this tells PLIC that this secondary is not
+// supported running on this "old" of a level of PLIC.
+//
+// Likewise, we will compare the primary xMinSlicVrmIndex to
+// the secondary xVrmIndex.
+// If the primary xMinSlicVrmDelta > secondary xVrmDelta then we
+// know that this PLIC does not support running an OS "that old".
+//
+//=============================================================================
+
+struct HvReleaseData
+{
+ u32 xDesc; // Descriptor "HvRD" ebcdic x00-x03
+ u16 xSize; // Size of this control block x04-x05
+ u16 xVpdAreasPtrOffset; // Offset in NACA of ItVpdAreas x06-x07
+ struct Naca * xSlicNacaAddr; // Virtual address of SLIC NACA x08-x0F
+ u32 xMsNucDataOffset; // Offset of Linux Mapping Data x10-x13
+ u32 xRsvd1; // Reserved x14-x17
+ u16 xTagsMode:1; // 0 == tags active, 1 == tags inactive
+ u16 xAddressSize:1; // 0 == 64-bit, 1 == 32-bit
+ u16 xNoSharedProcs:1; // 0 == shared procs, 1 == no shared
+ u16 xNoHMT:1; // 0 == allow HMT, 1 == no HMT
+ u16 xRsvd2:12; // Reserved x18-x19
+ u16 xVrmIndex; // VRM Index of OS image x1A-x1B
+ u16 xMinSupportedPlicVrmIndex;// Min PLIC level (soft) x1C-x1D
+ u16 xMinCompatablePlicVrmIndex;// Min PLIC levelP (hard) x1E-x1F
+ char xVrmName[12]; // Displayable name x20-x2B
+ char xRsvd3[20]; // Reserved x2C-x3F
+};
+
+#endif // _HVRELEASEDATA_H
--- /dev/null
+/*
+ * HvTypes.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//===========================================================================
+// Header File Id
+// Name______________: HvTypes.H
+//
+// Description_______:
+//
+// General typedefs for the hypervisor.
+//
+// Declared Class(es):
+//
+//===========================================================================
+
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+
+
+#ifndef _HVTYPES_H
+#define _HVTYPES_H
+
+//-------------------------------------------------------------------
+// Typedefs
+//-------------------------------------------------------------------
+typedef u8 HvLpIndex;
+typedef u16 HvLpInstanceId;
+typedef u64 HvLpTOD;
+typedef u64 HvLpSystemSerialNum;
+typedef u8 HvLpDeviceSerialNum[12];
+typedef u16 HvLpSanHwSet;
+typedef u16 HvLpBus;
+typedef u16 HvLpBoard;
+typedef u16 HvLpCard;
+typedef u8 HvLpDeviceType[4];
+typedef u8 HvLpDeviceModel[3];
+typedef u64 HvIoToken;
+typedef u8 HvLpName[8];
+typedef u32 HvIoId;
+typedef u64 HvRealMemoryIndex;
+typedef u32 HvLpIndexMap; // Must hold HvMaxArchitectedLps bits!!!
+typedef u16 HvLpVrmIndex;
+typedef u32 HvXmGenerationId;
+typedef u8 HvLpBusPool;
+typedef u8 HvLpSharedPoolIndex;
+typedef u16 HvLpSharedProcUnitsX100;
+typedef u8 HvLpVirtualLanIndex;
+typedef u16 HvLpVirtualLanIndexMap; // Must hold HvMaxArchitectedVirtualLans bits!!!
+typedef u16 HvBusNumber; // Hypervisor Bus Number
+typedef u8 HvSubBusNumber; // Hypervisor SubBus Number
+typedef u8 HvAgentId; // Hypervisor DevFn
+
+
+#define HVMAXARCHITECTEDLPS 32
+#define HVCHUNKSIZE 256 * 1024
+#define HVPAGESIZE 4 * 1024
+#define HVLPMINMEGSPRIMARY 256
+#define HVLPMINMEGSSECONDARY 64
+#define HVCHUNKSPERMEG 4
+#define HVPAGESPERMEG 256
+#define HVPAGESPERCHUNK 64
+
+#define HvMaxArchitectedLps ((HvLpIndex)HVMAXARCHITECTEDLPS)
+#define HvMaxArchitectedVirtualLans ((HvLpVirtualLanIndex)16)
+#define HvLpIndexInvalid ((HvLpIndex)0xff)
+
+//--------------------------------------------------------------------
+// Enums for the sub-components under PLIC
+// Used in HvCall and HvPrimaryCall
+//--------------------------------------------------------------------
+enum HvCallCompIds
+{
+ HvCallCompId = 0,
+ HvCallCpuCtlsCompId = 1,
+ HvCallCfgCompId = 2,
+ HvCallEventCompId = 3,
+ HvCallHptCompId = 4,
+ HvCallPciCompId = 5,
+ HvCallSlmCompId = 6,
+ HvCallSmCompId = 7,
+ HvCallSpdCompId = 8,
+ HvCallXmCompId = 9,
+ HvCallRioCompId = 10,
+ HvCallRsvd3CompId = 11,
+ HvCallRsvd2CompId = 12,
+ HvCallRsvd1CompId = 13,
+ HvCallMaxCompId = 14,
+ HvPrimaryCallCompId = 0,
+ HvPrimaryCallCfgCompId = 1,
+ HvPrimaryCallPciCompId = 2,
+ HvPrimaryCallSmCompId = 3,
+ HvPrimaryCallSpdCompId = 4,
+ HvPrimaryCallXmCompId = 5,
+ HvPrimaryCallRioCompId = 6,
+ HvPrimaryCallRsvd7CompId = 7,
+ HvPrimaryCallRsvd6CompId = 8,
+ HvPrimaryCallRsvd5CompId = 9,
+ HvPrimaryCallRsvd4CompId = 10,
+ HvPrimaryCallRsvd3CompId = 11,
+ HvPrimaryCallRsvd2CompId = 12,
+ HvPrimaryCallRsvd1CompId = 13,
+ HvPrimaryCallMaxCompId = HvCallMaxCompId
+};
+
+struct HvLpBufferList {
+ u64 addr;
+ u64 len;
+};
+
+#endif // _HVTYPES_H
--- /dev/null
+/*
+ * IoHriMainStore.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _IOHRIMAINSTORE_H
+#define _IOHRIMAINSTORE_H
+
+/* Main Store Vpd for Condor,iStar,sStar */
+struct IoHriMainStoreSegment4 {
+ u8 msArea0Exists:1;
+ u8 msArea1Exists:1;
+ u8 msArea2Exists:1;
+ u8 msArea3Exists:1;
+ u8 reserved1:4;
+ u8 reserved2;
+
+ u8 msArea0Functional:1;
+ u8 msArea1Functional:1;
+ u8 msArea2Functional:1;
+ u8 msArea3Functional:1;
+ u8 reserved3:4;
+ u8 reserved4;
+
+ u32 totalMainStore;
+
+ u64 msArea0Ptr;
+ u64 msArea1Ptr;
+ u64 msArea2Ptr;
+ u64 msArea3Ptr;
+
+ u32 cardProductionLevel;
+
+ u32 msAdrHole;
+
+ u8 msArea0HasRiserVpd:1;
+ u8 msArea1HasRiserVpd:1;
+ u8 msArea2HasRiserVpd:1;
+ u8 msArea3HasRiserVpd:1;
+ u8 reserved5:4;
+ u8 reserved6;
+ u16 reserved7;
+
+ u8 reserved8[28];
+
+ u64 nonInterleavedBlocksStartAdr;
+ u64 nonInterleavedBlocksEndAdr;
+};
+
+/* Main Store VPD for Power4 */
+struct IoHriMainStoreChipInfo1 {
+ u32 chipMfgID __attribute((packed));
+ char chipECLevel[4] __attribute((packed));
+};
+
+struct IoHriMainStoreVpdIdData {
+ char typeNumber[4];
+ char modelNumber[4];
+ char partNumber[12];
+ char serialNumber[12];
+};
+
+struct IoHriMainStoreVpdFruData {
+ char fruLabel[8] __attribute((packed));
+ u8 numberOfSlots __attribute((packed));
+ u8 pluggingType __attribute((packed));
+ u16 slotMapIndex __attribute((packed));
+};
+
+struct IoHriMainStoreAdrRangeBlock {
+ void * blockStart __attribute((packed));
+ void * blockEnd __attribute((packed));
+ u32 blockProcChipId __attribute((packed));
+};
+
+#define MaxAreaAdrRangeBlocks 4
+
+struct IoHriMainStoreArea4 {
+ u32 msVpdFormat __attribute((packed));
+ u8 containedVpdType __attribute((packed));
+ u8 reserved1 __attribute((packed));
+ u16 reserved2 __attribute((packed));
+
+ u64 msExists __attribute((packed));
+ u64 msFunctional __attribute((packed));
+
+ u32 memorySize __attribute((packed));
+ u32 procNodeId __attribute((packed));
+
+ u32 numAdrRangeBlocks __attribute((packed));
+ struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks] __attribute((packed));
+
+ struct IoHriMainStoreChipInfo1 chipInfo0 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo1 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo2 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo3 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo4 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo5 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo6 __attribute((packed));
+ struct IoHriMainStoreChipInfo1 chipInfo7 __attribute((packed));
+
+ void * msRamAreaArray __attribute((packed));
+ u32 msRamAreaArrayNumEntries __attribute((packed));
+ u32 msRamAreaArrayEntrySize __attribute((packed));
+
+ u32 numaDimmExists __attribute((packed));
+ u32 numaDimmFunctional __attribute((packed));
+ void * numaDimmArray __attribute((packed));
+ u32 numaDimmArrayNumEntries __attribute((packed));
+ u32 numaDimmArrayEntrySize __attribute((packed));
+
+ struct IoHriMainStoreVpdIdData idData __attribute((packed));
+
+ u64 powerData __attribute((packed));
+ u64 cardAssemblyPartNum __attribute((packed));
+ u64 chipSerialNum __attribute((packed));
+
+ u64 reserved3 __attribute((packed));
+ char reserved4[16] __attribute((packed));
+
+ struct IoHriMainStoreVpdFruData fruData __attribute((packed));
+
+ u8 vpdPortNum __attribute((packed));
+ u8 reserved5 __attribute((packed));
+ u8 frameId __attribute((packed));
+ u8 rackUnit __attribute((packed));
+ char asciiKeywordVpd[256] __attribute((packed));
+ u32 reserved6 __attribute((packed));
+};
+
+
+struct IoHriMainStoreSegment5 {
+ u16 reserved1;
+ u8 reserved2;
+ u8 msVpdFormat;
+
+ u32 totalMainStore;
+ u64 maxConfiguredMsAdr;
+
+ struct IoHriMainStoreArea4* msAreaArray;
+ u32 msAreaArrayNumEntries;
+ u32 msAreaArrayEntrySize;
+
+ u32 msAreaExists;
+ u32 msAreaFunctional;
+
+ u64 reserved3;
+};
+
+
+
+#endif // _IOHRIMAINSTORE_H
+
--- /dev/null
+/*
+ * IoHriProcessorVpd.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//===================================================================
+//
+// This struct maps Processor Vpd that is DMAd to SLIC by CSP
+//
+
+#ifndef _TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _IOHRIPROCESSORVPD_H
+#define _IOHRIPROCESSORVPD_H
+
+struct IoHriProcessorVpd
+{
+
+ u8 xFormat; // VPD format indicator x00-x00
+ u8 xProcStatus:8; // Processor State x01-x01
+ u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02
+ u8 xSrcType:1; // Src Type x03-x03
+ u8 xSrcSoft:1; // Src stay soft ...
+ u8 xSrcParable:1; // Src parable ...
+ u8 xRsvd1:5; // Reserved ...
+ u16 xHvPhysicalProcIndex; // Hypervisor physical proc index04-x05
+ u16 xRsvd2; // Reserved x06-x07
+ u32 xHwNodeId; // Hardware node id x08-x0B
+ u32 xHwProcId; // Hardware processor id x0C-x0F
+
+ u32 xTypeNum; // Card Type/CCIN number x10-x13
+ u32 xModelNum; // Model/Feature number x14-x17
+ u64 xSerialNum; // Serial number x18-x1F
+ char xPartNum[12]; // Book Part or FPU number x20-x2B
+ char xMfgID[4]; // Manufacturing ID x2C-x2F
+
+ u32 xProcFreq; // Processor Frequency x30-x33
+ u32 xTimeBaseFreq; // Time Base Frequency x34-x37
+
+ u32 xChipEcLevel; // Chip EC Levels x38-x3B
+ u32 xProcIdReg; // PIR SPR value x3C-x3F
+ u32 xPVR; // PVR value x40-x43
+ u8 xRsvd3[12]; // Reserved x44-x4F
+
+ u32 xInstCacheSize; // Instruction cache size in KB x50-x53
+ u32 xInstBlockSize; // Instruction cache block size x54-x57
+ u32 xDataCacheOperandSize; // Data cache operand size x58-x5B
+ u32 xInstCacheOperandSize; // Inst cache operand size x5C-x5F
+
+ u32 xDataL1CacheSizeKB; // L1 data cache size in KB x60-x63
+ u32 xDataL1CacheLineSize; // L1 data cache block size x64-x67
+ u64 xRsvd4; // Reserved x68-x6F
+
+ u32 xDataL2CacheSizeKB; // L2 data cache size in KB x70-x73
+ u32 xDataL2CacheLineSize; // L2 data cache block size x74-x77
+ u64 xRsvd5; // Reserved x78-x7F
+
+ u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83
+ u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87
+ u64 xRsvd6; // Reserved x88-x8F
+
+ u64 xFruLabel; // Card Location Label x90-x97
+ u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98
+ u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99
+ u16 xSlotMapIndex; // Index in slot map table x9A-x9B
+ u8 xSmartCardPortNo; // Smart card port number x9C-x9C
+ u8 xRsvd7; // Reserved x9D-x9D
+ u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F
+
+ u8 xRsvd8[24]; // Reserved xA0-xB7
+
+ char xProcSrc[72]; // CSP format SRC xB8-xFF
+};
+#endif // _IOHRIPROCESSORVPD_H
--- /dev/null
+/*
+ * ItIplParmsReal.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//==============================================================================
+//
+// This struct maps the IPL Parameters DMA'd from the SP.
+//
+// Warning:
+// This data must map in exactly 64 bytes and match the architecture for
+// the IPL parms
+//
+//=============================================================================
+
+
+//-------------------------------------------------------------------
+// Standard Includes
+//-------------------------------------------------------------------
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _ITIPLPARMSREAL_H
+#define _ITIPLPARMSREAL_H
+
+struct ItIplParmsReal
+{
+ u8 xFormat; // Defines format of IplParms x00-x00
+ u8 xRsvd01:6; // Reserved x01-x01
+ u8 xAlternateSearch:1; // Alternate search indicator ...
+ u8 xUaSupplied:1; // UA Supplied on programmed IPL ...
+ u8 xLsUaFormat; // Format byte for UA x02-x02
+ u8 xRsvd02; // Reserved x03-x03
+ u32 xLsUa; // LS UA x04-x07
+ u32 xUnusedLsLid; // First OS LID to load x08-x0B
+ u16 xLsBusNumber; // LS Bus Number x0C-x0D
+ u8 xLsCardAdr; // LS Card Address x0E-x0E
+ u8 xLsBoardAdr; // LS Board Address x0F-x0F
+ u32 xRsvd03; // Reserved x10-x13
+ u8 xSpcnPresent:1; // SPCN present x14-x14
+ u8 xCpmPresent:1; // CPM present ...
+ u8 xRsvd04:6; // Reserved ...
+ u8 xRsvd05:4; // Reserved x15-x15
+ u8 xKeyLock:4; // Keylock setting ...
+ u8 xRsvd06:6; // Reserved x16-x16
+ u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
+ u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
+ u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiated x18-x19
+ u16 xPowerOnResetIpl:1; // Indicate POR condition ...
+ u16 xMainStorePreserved:1; // Main Storage is preserved ...
+ u16 xRsvd07:13; // Reserved ...
+ u16 xIplSource:16; // Ipl source x1A-x1B
+ u8 xIplReason:8; // Reason for this IPL x1C-x1C
+ u8 xRsvd08; // Reserved x1D-x1D
+ u16 xRsvd09; // Reserved x1E-x1F
+ u16 xSysBoxType; // System Box Type x20-x21
+ u16 xSysProcType; // System Processor Type x22-x23
+ u32 xRsvd10; // Reserved x24-x27
+ u64 xRsvd11; // Reserved x28-x2F
+ u64 xRsvd12; // Reserved x30-x37
+ u64 xRsvd13; // Reserved x38-x3F
+};
+#endif // _ITIPLPARMSREAL_H
--- /dev/null
+/*
+ * ItLpNaca.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=============================================================================
+//
+// This control block contains the data that is shared between the
+// hypervisor (PLIC) and the OS.
+//
+//=============================================================================
+
+
+#ifndef _ITLPNACA_H
+#define _ITLPNACA_H
+
+struct ItLpNaca
+{
+//=============================================================================
+// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
+//=============================================================================
+ u32 xDesc; // Eye catcher x00-x03
+ u16 xSize; // Size of this class x04-x05
+ u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
+ u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
+ u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
+ u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
+ u8 xLpIndex; // LP Index x0B-x0B
+ u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
+ u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
+ u8 xPirEnvironMode:8; // Piranha or hardware x10-x10
+ u8 xPirConsoleMode:8; // Piranha console indicator x11-x11
+ u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12
+ u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
+ u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F
+ u8 xSysPartitioned:1; // Is the system partitioned ...
+ u8 xHwSyncedTBs:1; // Hardware synced TBs ...
+ u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ...
+ u8 xRsvd1_1:4; // Reserved ...
+ u8 xSpVpdFormat:8; // VPD areas are in CSP format ...
+ u8 xIntProcRatio:8; // Ratio of int procs to procs ...
+ u8 xRsvd1_2[5]; // Reserved ...
+ u16 xRsvd1_3; // Reserved x20-x21
+ u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
+ u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
+ u16 xMinCompatableSlicVrmInd;// Min compatable OS VRM index x26-x27
+ u64 xLoadAreaAddr; // ER address of load area x28-x2F
+ u32 xLoadAreaChunks; // Chunks for the load area x30-x33
+ u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
+ // doing an ASR switch on PASE
+ // system call.
+ u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
+ u8 xRsvd1_4[64]; // x40-x7F
+
+//=============================================================================
+// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
+//=============================================================================
+ u8 xRsvd2_0[128]; // Reserved x00-x7F
+
+//=============================================================================
+// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
+// NB: Padding required to keep xInterrruptHdlr at x300 which is required
+// for v4r4 PLIC.
+//=============================================================================
+ u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
+ u8 xRsvd3_0[384]; // Reserved 180-2FF
+//=============================================================================
+// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
+// handlers
+//=============================================================================
+ u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
+};
+
+//=============================================================================
+
+#endif // _ITLPNACA_H
--- /dev/null
+/*
+ * ItLpPaca.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=============================================================================
+//
+// This control block contains the data that is shared between the
+// hypervisor (PLIC) and the OS.
+//
+//
+//----------------------------------------------------------------------------
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _ITLPPACA_H
+#define _ITLPPACA_H
+
+
+struct ItLpPaca
+{
+//=============================================================================
+// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
+// NOTE: The xDynXyz fields are fields that will be dynamically changed by
+// PLIC when preparing to bring a processor online or when dispatching a
+// virtual processor!
+//=============================================================================
+ u32 xDesc; // Eye catcher 0xD397D781 x00-x03
+ u16 xSize; // Size of this struct x04-x05
+ u16 xRsvd1_0; // Reserved x06-x07
+ u16 xRsvd1_1:14; // Reserved x08-x09
+ u8 xSharedProc:1; // Shared processor indicator ...
+ u8 xSecondaryThread:1; // Secondary thread indicator ...
+ volatile u8 xDynProcStatus:8; // Dynamic Status of this proc x0A-x0A
+ u8 xSecondaryThreadCnt; // Secondary thread count x0B-x0B
+ volatile u16 xDynHvPhysicalProcIndex;// Dynamic HV Physical Proc Index0C-x0D
+ volatile u16 xDynHvLogicalProcIndex;// Dynamic HV Logical Proc Indexx0E-x0F
+ u32 xDecrVal; // Value for Decr programming x10-x13
+ u32 xPMCVal; // Value for PMC regs x14-x17
+ volatile u32 xDynHwNodeId; // Dynamic Hardware Node id x18-x1B
+ volatile u32 xDynHwProcId; // Dynamic Hardware Proc Id x1C-x1F
+ volatile u32 xDynPIR; // Dynamic ProcIdReg value x20-x23
+ u32 xDseiData; // DSEI data x24-x27
+ u64 xSPRG3; // SPRG3 value x28-x2F
+ u8 xRsvd1_3[80]; // Reserved x30-x7F
+
+//=============================================================================
+// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
+//=============================================================================
+ // This Dword contains a byte for each type of interrupt that can occur.
+ // The IPI is a count while the others are just a binary 1 or 0.
+ union {
+ u64 xAnyInt;
+ struct {
+ u16 xRsvd; // Reserved - cleared by #mpasmbl
+ u8 xXirrInt; // Indicates xXirrValue is valid or Immed IO
+ u8 xIpiCnt; // IPI Count
+ u8 xDecrInt; // DECR interrupt occurred
+ u8 xPdcInt; // PDC interrupt occurred
+ u8 xQuantumInt; // Interrupt quantum reached
+ u8 xOldPlicDeferredExtInt; // Old PLIC has a deferred XIRR pending
+ } xFields;
+ } xIntDword;
+
+ // Whenever any fields in this Dword are set then PLIC will defer the
+ // processing of external interrupts. Note that PLIC will store the
+ // XIRR directly into the xXirrValue field so that another XIRR will
+ // not be presented until this one clears. The layout of the low
+ // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the
+ // entire Dword is zero or not. A non-zero value in the low order
+ // 2-bytes will result in SLIC being granted the highest thread
+ // priority upon return. A 0 will return to SLIC as medium priority.
+ u64 xPlicDeferIntsArea; // Entire Dword
+
+ // Used to pass the real SRR0/1 from PLIC to SLIC as well as to
+ // pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid.
+ u64 xSavedSrr0; // Saved SRR0 x10-x17
+ u64 xSavedSrr1; // Saved SRR1 x18-x1F
+
+ // Used to pass parms from the OS to PLIC for SetAsrAndRfid
+ u64 xSavedGpr3; // Saved GPR3 x20-x27
+ u64 xSavedGpr4; // Saved GPR4 x28-x2F
+ u64 xSavedGpr5; // Saved GPR5 x30-x37
+
+ u8 xRsvd2_1; // Reserved x38-x38
+ u8 xCpuCtlsTaskAttributes; // Task attributes for cpuctls x39-x39
+ u8 xFPRegsInUse; // FP regs in use x3A-x3A
+ u8 xPMCRegsInUse; // PMC regs in use x3B-x3B
+ volatile u32 xSavedDecr; // Saved Decr Value x3C-x3F
+ volatile u64 xEmulatedTimeBase;// Emulated TB for this thread x40-x47
+ volatile u64 xCurPLICLatency; // Unaccounted PLIC latency x48-x4F
+ u64 xTotPLICLatency; // Accumulated PLIC latency x50-x57
+ u64 xWaitStateCycles; // Wait cycles for this proc x58-x5F
+ u64 xEndOfQuantum; // TB at end of quantum x60-x67
+ u64 xPDCSavedSPRG1; // Saved SPRG1 for PMC int x68-x6F
+ u64 xPDCSavedSRR0; // Saved SRR0 for PMC int x70-x77
+ volatile u32 xVirtualDecr; // Virtual DECR for shared procsx78-x7B
+ u32 xRsvd2_2; // Reserved x7C-x7F
+
+//=============================================================================
+// CACHE_LINE_3 0x0100 - 0x007F: This line is shared with other processors
+//=============================================================================
+ // This is the xYieldCount. An "odd" value (low bit on) means that
+ // the processor is yielded (either because of an OS yield or a PLIC
+ // preempt). An even value implies that the processor is currently
+ // executing.
+ // NOTE: This value will ALWAYS be zero for dedicated processors and
+ // will NEVER be zero for shared processors (ie, initialized to a 1).
+ volatile u32 xYieldCount; // PLIC increments each dispatchx00-x03
+ u8 xRsvd3_0[124]; // Reserved x04-x7F
+
+//=============================================================================
+// CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
+//=============================================================================
+ u8 xPmcSaveArea[256]; // PMC interrupt Area x00-xFF
+
+
+};
+#endif // _ITLPPACA_H
--- /dev/null
+/*
+ * ItLpQueue.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=============================================================================
+//
+// This control block defines the simple LP queue structure that is
+// shared between the hypervisor (PLIC) and the OS in order to send
+// events to an LP.
+//
+
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+#include <asm/ptrace.h>
+
+
+struct HvLpEvent;
+
+
+#ifndef _ITLPQUEUE_H
+#define _ITLPQUEUE_H
+
+#define ITMaxLpQueues 8
+
+#define NotUsed 0 // Queue will not be used by PLIC
+#define DedicatedIo 1 // Queue dedicated to IO processor specified
+#define DedicatedLp 2 // Queue dedicated to LP specified
+#define Shared 3 // Queue shared for both IO and LP
+
+#define LpEventStackSize 4096
+#define LpEventMaxSize 256
+#define LpEventAlign 64
+
+struct ItLpQueue
+{
+//
+// The xSlicCurEventPtr is the pointer to the next event stack entry that will
+// become valid. The OS must peek at this entry to determine if it is valid.
+// PLIC will set the valid indicator as the very last store into that entry.
+//
+// When the OS has completed processing of the event then it will mark the event
+// as invalid so that PLIC knows it can store into that event location again.
+//
+// If the event stack fills and there are overflow events, then PLIC will set
+// the xPlicOverflowIntPending flag in which case the OS will have to fetch the
+// additional LP events once they have drained the event stack.
+//
+// The first 16-bytes are known by both the OS and PLIC. The remainder of the
+// cache line is for use by the OS.
+//
+//=============================================================================
+ u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending
+ u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed
+ u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation
+ u8 xPlicRsvd[12]; // 0x04
+ char* xSlicCurEventPtr; // 0x10
+ char* xSlicLastValidEventPtr; // 0x18
+ char* xSlicEventStackPtr; // 0x20
+ u8 xIndex; // 0x28 unique sequential index.
+ u8 xSlicRsvd[3]; // 0x29-2b
+ u32 xInUseWord; // 0x2C
+ u64 xLpIntCount; // 0x30 Total Lp Int msgs processed
+ u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type
+};
+
+extern struct ItLpQueue xItLpQueue;
+
+extern struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * );
+extern int ItLpQueue_isLpIntPending( struct ItLpQueue * );
+extern unsigned ItLpQueue_process( struct ItLpQueue *, struct pt_regs * );
+extern void ItLpQueue_clearValid( struct HvLpEvent * );
+
+static __inline__ void process_iSeries_events( void )
+{
+ __asm__ __volatile__ (
+ " li 0,0x5555 \n\
+ sc"
+ : : : "r0", "r3" );
+}
+
+
+//=============================================================================
+#endif // _ITLPQUEUE_H
--- /dev/null
+/*
+ * ItLpRegSave.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=====================================================================================
+//
+// This control block contains the data that is shared between PLIC
+// and the OS
+//
+//
+
+#ifndef _ITLPREGSAVE_H
+#define _ITLPREGSAVE_H
+
+struct ItLpRegSave
+{
+ u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
+ u16 xSize; // Size of this class 004-005
+ u8 xInUse; // Area is live 006-007
+ u8 xRsvd1[9]; // Reserved 007-00F
+
+ u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
+ u32 xCTRL; // Control Register 170-173
+ u32 xDEC; // Decrementer 174-177
+ u32 xFPSCR; // FP Status and Control Reg 178-17B
+ u32 xPVR; // Processor Version Number 17C-17F
+
+ u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
+ u32 xPMC1; // Perf Monitor Counter 1 188-18B
+ u32 xPMC2; // Perf Monitor Counter 2 18C-18F
+ u32 xPMC3; // Perf Monitor Counter 3 190-193
+ u32 xPMC4; // Perf Monitor Counter 4 194-197
+ u32 xPIR; // Processor ID Reg 198-19B
+
+ u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
+ u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
+ u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
+ u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
+ u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
+ u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
+ u32 xTSC; // Thread Switch Control 1B4-1B7
+ u32 xTST; // Thread Switch Timeout 1B8-1BB
+ u32 xRsvd; // Reserved 1BC-1BF
+
+ u64 xACCR; // Address Compare Control Reg 1C0-1C7
+ u64 xIMR; // Instruction Match Register 1C8-1CF
+ u64 xSDR1; // Storage Description Reg 1 1D0-1D7
+ u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
+ u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
+ u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
+ u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
+ u64 xTB; // Time Base Register 1F8-1FF
+
+ u64 xFPR[32]; // Floating Point Registers 200-2FF
+
+ u64 xMSR; // Machine State Register 300-307
+ u64 xNIA; // Next Instruction Address 308-30F
+
+ u64 xDABR; // Data Address Breakpoint Reg 310-317
+ u64 xIABR; // Inst Address Breakpoint Reg 318-31F
+
+ u64 xHID0; // HW Implementation Dependent0 320-327
+
+ u64 xHID4; // HW Implementation Dependent4 328-32F
+ u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
+ u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
+ u64 xSDAR; // Sample Data Address Register 340-347
+ u64 xSIAR; // Sample Inst Address Register 348-34F
+
+ u8 xRsvd3[176]; // Reserved 350-3FF
+};
+#endif // _ITLPREGSAVE_H
--- /dev/null
+/*
+ * ItSpCommArea.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#ifndef _ITSPCOMMAREA_H
+#define _ITSPCOMMAREA_H
+
+
+struct SpCommArea
+{
+ u32 xDesc; // Descriptor (only in new formats) 000-003
+ u8 xFormat; // Format (only in new formats) 004-004
+ u8 xRsvd1[11]; // Reserved 005-00F
+ u64 xRawTbAtIplStart; // Raw HW TB value when IPL is started 010-017
+ u64 xRawTodAtIplStart; // Raw HW TOD value when IPL is started 018-01F
+ u64 xBcdTimeAtIplStart; // BCD time when IPL is started 020-027
+ u64 xBcdTimeAtOsStart; // BCD time when OS passed control 028-02F
+ u8 xRsvd2[80]; // Reserved 030-07F
+};
+
+extern struct SpCommArea xSpCommArea;
+
+#endif /* _ITSPCOMMAREA_H */
--- /dev/null
+/*
+ * ItVpdAreas.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+//=====================================================================================
+//
+// This file defines the address and length of all of the VPD area passed to
+// the OS from PLIC (most of which start from the SP).
+//
+
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+
+
+#ifndef _ITVPDAREAS_H
+#define _ITVPDAREAS_H
+
+// VPD Entry index is carved in stone - cannot be changed (easily).
+#define ItVpdCecVpd 0
+#define ItVpdDynamicSpace 1
+#define ItVpdExtVpd 2
+#define ItVpdExtVpdOnPanel 3
+#define ItVpdFirstPaca 4
+#define ItVpdIoVpd 5
+#define ItVpdIplParms 6
+#define ItVpdMsVpd 7
+#define ItVpdPanelVpd 8
+#define ItVpdLpNaca 9
+#define ItVpdBackplaneAndMaybeClockCardVpd 10
+#define ItVpdRecoveryLogBuffer 11
+#define ItVpdSpCommArea 12
+#define ItVpdSpLogBuffer 13
+#define ItVpdSpLogBufferSave 14
+#define ItVpdSpCardVpd 15
+#define ItVpdFirstProcVpd 16
+#define ItVpdApModelVpd 17
+#define ItVpdClockCardVpd 18
+#define ItVpdBusExtCardVpd 19
+#define ItVpdProcCapacityVpd 20
+#define ItVpdInteractiveCapacityVpd 21
+#define ItVpdFirstSlotLabel 22
+#define ItVpdFirstLpQueue 23
+#define ItVpdFirstL3CacheVpd 24
+#define ItVpdFirstProcFruVpd 25
+
+#define ItVpdMaxEntries 26
+
+
+#define ItDmaMaxEntries 10
+
+#define ItVpdAreasMaxSlotLabels 192
+
+
+struct SlicVpdAdrs {
+ u32 pad1;
+ void * vpdAddr;
+};
+
+
+struct ItVpdAreas
+{
+ u32 xSlicDesc; // Descriptor 000-003
+ u16 xSlicSize; // Size of this control block 004-005
+ u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface 006-007
+ u16 xRsvd1:15; // Reserved bits ...
+ u16 xSlicVpdEntries; // Number of VPD entries 008-009
+ u16 xSlicDmaEntries; // Number of DMA entries 00A-00B
+ u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D
+ u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F
+ u16 xSlicDmaToksOffset; // Offset into this of array 010-011
+ u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013
+ u16 xSlicDmaLensOffset; // Offset into this of array 014-015
+ u16 xSlicVpdLensOffset; // Offset into this of array 016-017
+ u16 xSlicMaxSlotLabels; // Maximum number of slot labels 018-019
+ u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B
+ u8 xRsvd2[4]; // Reserved 01C-01F
+ u64 xRsvd3[12]; // Reserved 020-07F
+ u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7
+ u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF
+ u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F
+ void * xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
+};
+
+#endif // _ITVPDAREAS_H
--- /dev/null
+/*
+ * LparData.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _LPARDATA_H
+#define _LPARDATA_H
+
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/abs_addr.h>
+
+#include <asm/Naca.h>
+#include <asm/iSeries/ItLpNaca.h>
+#include <asm/iSeries/ItLpPaca.h>
+#include <asm/iSeries/ItLpRegSave.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/HvReleaseData.h>
+#include <asm/iSeries/LparMap.h>
+#include <asm/iSeries/ItVpdAreas.h>
+#include <asm/iSeries/ItIplParmsReal.h>
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/IoHriProcessorVpd.h>
+#include <asm/page.h>
+
+extern struct LparMap xLparMap;
+extern struct Naca xNaca;
+extern struct Paca xPaca[];
+extern struct HvReleaseData hvReleaseData;
+extern struct ItLpNaca itLpNaca;
+extern struct ItIplParmsReal xItIplParmsReal;
+extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
+extern struct ItLpQueue xItLpQueue;
+extern struct ItVpdAreas itVpdAreas;
+extern u64 xMsVpd[];
+extern struct msChunks msChunks;
+
+
+#endif /* _LPARDATA_H */
--- /dev/null
+/*
+ * LparMap.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PPC_TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _LPARMAP_H
+#define _LPARMAP_H
+
+/* The iSeries hypervisor will set up mapping for one or more
+ * ESID/VSID pairs (in SLB/segment registers) and will set up
+ * mappings of one or more ranges of pages to VAs.
+ * We will have the hypervisor set up the ESID->VSID mapping
+ * for the four kernel segments (C-F). With shared processors,
+ * the hypervisor will clear all segment registers and reload
+ * these four whenever the processor is switched from one
+ * partition to another.
+ */
+
+/* The Vsid and Esid identified below will be used by the hypervisor
+ * to set up a memory mapping for part of the load area before giving
+ * control to the Linux kernel. The load area is 64 MB, but this must
+ * not attempt to map the whole load area. The Hashed Page Table may
+ * need to be located within the load area (if the total partition size
+ * is 64 MB), but cannot be mapped. Typically, this should specify
+ * to map half (32 MB) of the load area.
+ *
+ * The hypervisor will set up page table entries for the number of
+ * pages specified.
+ *
+ * In 32-bit mode, the hypervisor will load all four of the
+ * segment registers (identified by the low-order four bits of the
+ * Esid field. In 64-bit mode, the hypervisor will load one SLB
+ * entry to map the Esid to the Vsid.
+*/
+
+// Hypervisor initially maps 32MB of the load area
+#define HvPagesToMap 8192
+
+struct LparMap
+{
+ u64 xNumberEsids; // Number of ESID/VSID pairs (1)
+ u64 xNumberRanges; // Number of VA ranges to map (1)
+ u64 xSegmentTableOffs; // Page number within load area of seg table (0)
+ u64 xRsvd[5]; // Reserved (0)
+ u64 xKernelEsid; // Esid used to map kernel load (0x0C00000000)
+ u64 xKernelVsid; // Vsid used to map kernel load (0x0C00000000)
+ u64 xPages; // Number of pages to be mapped (8192)
+ u64 xOffset; // Offset from start of load area (0)
+ u64 xVPN; // Virtual Page Number (0x000C000000000000)
+};
+
+#endif /* _LPARMAP_H */
--- /dev/null
+
+#ifndef __XMPCILPEVENT_H__
+#define __XMPCILPEVENT_H__
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int XmPciLpEvent_init(void);
+void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __XMPCILPEVENT_H__ */
--- /dev/null
+#ifndef _ISERIES_VPDINFO_H
+#define _ISERIES_VPDINFO_H
+/************************************************************************/
+/* File iSeries_VpdInfo.h created by Allan Trautman Feb 08 2001. */
+/************************************************************************/
+/* This code supports the location data fon on the IBM iSeries systems. */
+/* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created, Feg 8, 2001 */
+/* Reformated for Card, March 8, 2001 */
+/* End Change Activity */
+/************************************************************************/
+
+struct pci_dev; /* Forward Declare */
+/************************************************************************/
+/* Location Data extracted from the VPD list and device info. */
+/************************************************************************/
+struct LocationDataStruct { /* Location data structure for device */
+ u16 Bus; /* iSeries Bus Number 0x00*/
+ u16 Board; /* iSeries Board 0x02*/
+ u8 FrameId; /* iSeries spcn Frame Id 0x04*/
+ u8 PhbId; /* iSeries Phb Location 0x05*/
+ u16 Card; /* iSeries Card Slot 0x06*/
+ char CardLocation[4]; /* Char format of planar vpd 0x08*/
+ u8 AgentId; /* iSeries AgentId 0x0C*/
+ u8 SecondaryAgentId; /* iSeries Secondary Agent Id 0x0D*/
+ u8 LinuxBus; /* Linux Bus Number 0x0E*/
+ u8 LinuxDevFn; /* Linux Device Function 0x0F*/
+};
+typedef struct LocationDataStruct LocationData;
+#define LOCATION_DATA_SIZE 16
+
+/************************************************************************/
+/* Protypes */
+/************************************************************************/
+extern LocationData* iSeries_GetLocationData(struct pci_dev* PciDev);
+extern int iSeries_Device_Information(struct pci_dev*,char*, int);
+
+#endif /* _ISERIES_VPDINFO_H */
--- /dev/null
+/*
+ * iSeries_dma.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ISERIES_DMA_H
+#define _ISERIES_DMA_H
+
+#include <asm/types.h>
+#ifndef __LINUX_SPINLOCK_H
+#include <linux/spinlock.h>
+#endif
+
+// NUM_TCE_LEVELS defines the largest contiguous block
+// of dma (tce) space we can get. NUM_TCE_LEVELS = 10
+// allows up to 2**9 pages (512 * 4096) = 2 MB
+#define NUM_TCE_LEVELS 10
+
+#define NO_TCE ((dma_addr_t)-1)
+
+// Tces come in two formats, one for the virtual bus and a different
+// format for PCI
+#define TCE_VB 0
+#define TCE_PCI 1
+
+
+union Tce {
+ u64 wholeTce;
+ struct {
+ u64 cacheBits :6; /* Cache hash bits - not used */
+ u64 rsvd :6;
+ u64 rpn :40; /* Absolute page number */
+ u64 valid :1; /* Tce is valid (vb only) */
+ u64 allIo :1; /* Tce is valid for all lps (vb only) */
+ u64 lpIndex :8; /* LpIndex for user of TCE (vb only) */
+ u64 pciWrite :1; /* Write allowed (pci only) */
+ u64 readWrite :1; /* Read allowed (pci), Write allowed
+ (vb) */
+ } tceBits;
+};
+
+struct Bitmap {
+ unsigned long numBits;
+ unsigned long numBytes;
+ unsigned char * map;
+};
+
+struct MultiLevelBitmap {
+ unsigned long maxLevel;
+ struct Bitmap level[NUM_TCE_LEVELS];
+};
+
+struct TceTable {
+ u64 busNumber;
+ u64 size;
+ u64 startOffset;
+ u64 index;
+ spinlock_t lock;
+ struct MultiLevelBitmap mlbm;
+};
+
+struct HvTceTableManagerCB {
+ u64 busNumber; /* Bus number for this tce table */
+ u64 start; /* Will be NULL for secondary */
+ u64 totalSize; /* Size (in pages) of whole table */
+ u64 startOffset; /* Index into real tce table of the
+ start of our section */
+ u64 size; /* Size (in pages) of our section */
+ u64 index; /* Index of this tce table (token?) */
+ u16 maxTceTableIndex; /* Max number of tables for partition */
+ u8 virtualBusFlag; /* Flag to indicate virtual bus */
+ u8 rsvd[5];
+};
+
+extern struct TceTable virtBusTceTable; /* Tce table for virtual bus */
+
+extern struct TceTable * build_tce_table( struct HvTceTableManagerCB *,
+ struct TceTable *);
+extern void create_virtual_bus_tce_table( void );
+
+extern void create_pci_bus_tce_table( unsigned busNumber );
+
+#endif // _ISERIES_DMA_H
--- /dev/null
+
+#ifndef __ISERIES_FIXUP_H__
+#define __ISERIES_FIXUP_H__
+#include <linux/pci.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void iSeries_fixup (void);
+void iSeries_fixup_bus (struct pci_bus*);
+unsigned int iSeries_scan_slot (struct pci_dev*, u16, u8, u8);
+
+
+/* Need to store information related to the PHB bucc and make it accessible to the hose */
+struct iSeries_hose_arch_data {
+ u32 hvBusNumber;
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ISERIES_FIXUP_H__ */
--- /dev/null
+#include <linux/config.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#ifndef _ISERIES_IO_H
+#define _ISERIES_IO_H
+#include <linux/types.h>
+/************************************************************************/
+/* File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. */
+/************************************************************************/
+/* Remaps the io.h for the iSeries Io */
+/* Copyright (C) 20yy Allan H Trautman, IBM Corporation */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created December 28, 2000 */
+/* End Change Activity */
+/************************************************************************/
+extern u8 iSeries_Read_Byte(void* IoAddress);
+extern u16 iSeries_Read_Word(void* IoAddress);
+extern u32 iSeries_Read_Long(void* IoAddress);
+extern void iSeries_Write_Byte(u8 IoData,void* IoAddress);
+extern void iSeries_Write_Word(u16 IoData,void* IoAddress);
+extern void iSeries_Write_Long(u32 IoData,void* IoAddress);
+
+extern void* iSeries_memset_io(void *dest, char x, size_t n);
+extern void* iSeries_memcpy_toio(void *dest, void *source, size_t n);
+extern void* iSeries_memcpy_fromio(void *dest, void *source, size_t n);
+
+#endif /* _ISERIES_IO_H */
+#endif /* CONFIG_PPC_ISERIES */
+
--- /dev/null
+
+#ifndef __ISERIES_IRQ_H__
+#define __ISERIES_IRQ_H__
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+unsigned int iSeries_startup_IRQ(unsigned int);
+void iSeries_shutdown_IRQ(unsigned int);
+void iSeries_enable_IRQ(unsigned int);
+void iSeries_disable_IRQ(unsigned int);
+void iSeries_end_IRQ(unsigned int);
+void iSeries_init_IRQ(void);
+void iSeries_init_irqMap(int);
+int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
+int iSeries_assign_IRQ(int, HvBusNumber, HvSubBusNumber, HvAgentId);
+void iSeries_activate_IRQs(void);
+
+int XmPciLpEvent_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ISERIES_IRQ_H__ */
--- /dev/null
+#ifndef _ISERIES_64_PCI_H
+#define _ISERIES_64_PCI_H
+/************************************************************************/
+/* File iSeries_pci.h created by Allan Trautman on Tue Feb 20, 2001. */
+/************************************************************************/
+/* Define some useful macros for the iSeries pci routines. */
+/* Copyright (C) 20yy Allan H Trautman, IBM Corporation */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the: */
+/* Free Software Foundation, Inc., */
+/* 59 Temple Place, Suite 330, */
+/* Boston, MA 02111-1307 USA */
+/************************************************************************/
+/* Change Activity: */
+/* Created Feb 20, 2001 */
+/* Added device reset, March 22, 2001 */
+/* Ported to ppc64, May 25, 2001 */
+/* End Change Activity */
+/************************************************************************/
+#include <asm/iSeries/HvCallPci.h>
+
+struct pci_dev; /* For Forward Reference */
+struct iSeries_Device_Node;
+/************************************************************************/
+/* Gets iSeries Bus, SubBus, of DevFn using pci_dev* structure */
+/************************************************************************/
+#define ISERIES_BUS(DevPtr) DevPtr->DsaAddr.busNumber
+#define ISERIES_SUBBUS(DevPtr) DevPtr->DsaAddr.subBusNumber
+#define ISERIES_DEVICE(DevPtr) DevPtr->DsaAddr.deviceId
+#define ISERIES_DEVFUN(DevPtr) DevPtr->DevFn
+#define ISERIES_DSA(DevPtr) (*(u64*)&DevPtr->DsaAddr)
+
+#define EADsMaxAgents 7
+/************************************************************************************/
+/* Decodes Linux DevFn to iSeries DevFn, bridge device, or function. */
+/* For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h */
+/************************************************************************************/
+#define ISERIES_DECODE_DEVFN(linuxdevfn) (((linuxdevfn & 0x71) << 1) | (linuxdevfn & 0x07))
+#define ISERIES_DECODE_DEVICE(linuxdevfn) (((linuxdevfn & 0x38) >> 3) |(((linuxdevfn & 0x40) >> 2) + 0x10))
+#define ISERIES_DECODE_FUNCTION(linuxdevfn) (linuxdevfn & 0x07)
+#define ISERIES_PCI_AGENTID(idsel,func) ((idsel & 0x0F) << 4) | (func & 0x07)
+
+#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7)
+#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
+
+#define ISERIES_ENCODE_DEVICE(agentid) ((0x10) | ((agentid&0x20)>>2) | (agentid&07))
+/************************************************************************************/
+/* Converts Virtual Address to Real Address for Hypervisor calls */
+/************************************************************************************/
+#define REALADDR(virtaddr) (0x8000000000000000 | (virt_to_absolute((u64)virtaddr) ))
+
+/************************************************************************************/
+/* Define TRUE and FALSE Values for Al */
+/************************************************************************************/
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/************************************************************************/
+/* iSeries Device Information */
+/************************************************************************/
+struct iSeries_Device_Node {
+ struct list_head Device_List; /* Must be first for cast to wo*/
+ struct pci_dev* PciDev; /* Pointer to pci_dev structure*/
+ struct HvCallPci_DsaAddr DsaAddr;/* Direct Select Address */
+ /* busNumber,subBusNumber, */
+ /* deviceId, barNumber */
+ HvAgentId AgentId; /* Hypervisor DevFn */
+ int DevFn; /* Linux devfn */
+ int BarOffset;
+ int Irq; /* Assigned IRQ */
+ int ReturnCode; /* Return Code Holder */
+ int IoRetry; /* Current Retry Count */
+ int Flags; /* Possible flags(disable/bist)*/
+ u16 Vendor; /* Vendor ID */
+ struct TceTable* DevTceTable; /* Device TCE Table */
+ u8 PhbId; /* Phb Card is on. */
+ u16 Board; /* Board Number */
+ u8 FrameId; /* iSeries spcn Frame Id */
+ char CardLocation[4];/* Char format of planar vpd */
+ char Location[20]; /* Frame 1, Card C10 */
+};
+/************************************************************************/
+/* Location Data extracted from the VPD list and device info. */
+/************************************************************************/
+struct LocationDataStruct { /* Location data structure for device */
+ u16 Bus; /* iSeries Bus Number 0x00*/
+ u16 Board; /* iSeries Board 0x02*/
+ u8 FrameId; /* iSeries spcn Frame Id 0x04*/
+ u8 PhbId; /* iSeries Phb Location 0x05*/
+ u8 AgentId; /* iSeries AgentId 0x06*/
+ u8 Card;
+ char CardLocation[4];
+};
+typedef struct LocationDataStruct LocationData;
+#define LOCATION_DATA_SIZE 48
+/************************************************************************/
+/* Flight Recorder tracing */
+/************************************************************************/
+extern int iSeries_Set_PciTraceFlag(int TraceFlag);
+extern int iSeries_Get_PciTraceFlag(void);
+
+/************************************************************************/
+/* Functions */
+/************************************************************************/
+extern LocationData* iSeries_GetLocationData(struct pci_dev* PciDev);
+extern int iSeries_Device_Information(struct pci_dev*,char*, int);
+extern void iSeries_Get_Location_Code(struct iSeries_Device_Node*);
+extern int iSeries_Device_ToggleReset(struct pci_dev* PciDev, int AssertTime, int DelayTime);
+
+#endif /* _ISERIES_64_PCI_H */
--- /dev/null
+/*
+ * iSeries_proc.h
+ * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity: */
+/* End Change Activity */
+
+#ifndef _ISERIES_PROC_H
+#define _ISERIES_PROC_H
+
+#include <linux/proc_fs.h>
+
+extern void iSeries_proc_early_init(void);
+extern void iSeries_proc_create(void);
+
+typedef void (*iSeriesProcFunction)(struct proc_dir_entry *iSeries_proc);
+
+extern void iSeries_proc_callback(iSeriesProcFunction initFunction);
+
+#endif /* _iSeries_PROC_H */
+
--- /dev/null
+/*
+ * mf.h
+ * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
+ *
+ * This modules exists as an interface between a Linux secondary partition
+ * running on an iSeries and the primary partition's Virtual Service
+ * Processor (VSP) object. The VSP has final authority over powering on/off
+ * all partitions in the iSeries. It also provides miscellaneous low-level
+ * machine facility type operations.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef MF_H_INCLUDED
+#define MF_H_INCLUDED
+
+#include <asm/iSeries/HvTypes.h>
+#include <asm/iSeries/HvLpEvent.h>
+
+struct rtc_time;
+
+typedef void (*MFCompleteHandler)( void * clientToken, int returnCode );
+
+extern void mf_allocateLpEvents( HvLpIndex targetLp,
+ HvLpEvent_Type type,
+ unsigned size,
+ unsigned amount,
+ MFCompleteHandler hdlr,
+ void * userToken );
+
+extern void mf_deallocateLpEvents( HvLpIndex targetLp,
+ HvLpEvent_Type type,
+ unsigned count,
+ MFCompleteHandler hdlr,
+ void * userToken );
+
+extern void mf_powerOff( void );
+
+extern void mf_reboot( void );
+
+extern void mf_displaySrc( u32 word );
+extern void mf_displayProgress( u16 value );
+
+extern void mf_clearSrc( void );
+
+extern void mf_init( void );
+
+extern void mf_setSide(char side);
+
+extern char mf_getSide(void);
+
+extern void mf_setCmdLine(const char *cmdline, int size, u64 side);
+
+extern int mf_getCmdLine(char *cmdline, int *size, u64 side);
+
+extern void mf_getSrcHistory(char *buffer, int size);
+
+extern int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side);
+
+extern int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side);
+
+extern int mf_setRtcTime(unsigned long time);
+
+extern int mf_getRtcTime(unsigned long *time);
+
+extern int mf_getRtc( struct rtc_time * tm );
+
+extern int mf_setRtc( struct rtc_time * tm );
+
+#endif /* MF_H_INCLUDED */
--- /dev/null
+/*
+ * mf_proc.h
+ * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity: */
+/* End Change Activity */
+
+#ifndef _MF_PROC_H
+#define _MF_PROC_H
+
+#include <linux/proc_fs.h>
+
+void mf_proc_init(struct proc_dir_entry *iSeries_proc);
+
+
+#endif /* _MF_PROC_H */
+
--- /dev/null
+/*
+ * veth-proc.h
+ * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity: */
+/* End Change Activity */
+
+#ifndef _VETH_PROC_H
+#define _VETH_PROC_H
+
+#include <linux/proc_fs.h>
+
+void veth_proc_init(struct proc_dir_entry *iSeries_proc);
+
+#endif /* _VETH-PROC_H */
+
--- /dev/null
+/*
+ * linux/include/asm-ppc/ide.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This file contains the ppc64 architecture specific IDE code.
+ */
+
+#ifndef __ASMPPC64_IDE_H
+#define __ASMPPC64_IDE_H
+
+#ifdef __KERNEL__
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS 4
+#endif
+
+#define ide__sti() __sti()
+
+void ppc64_ide_fix_driveid(struct hd_driveid *id);
+#define ide_fix_driveid(id) ppc64_ide_fix_driveid((id))
+
+static __inline__ int ide_default_irq(ide_ioreg_t base) { return 0; }
+static __inline__ ide_ioreg_t ide_default_io_base(int index) { return 0; }
+
+static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
+{
+ ide_ioreg_t reg = data_port;
+ int i;
+
+ for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+ hw->io_ports[i] = reg;
+ reg += 1;
+ }
+ if (ctrl_port) {
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ } else {
+ hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206;
+ }
+ if (irq != NULL)
+ *irq = 0;
+ hw->io_ports[IDE_IRQ_OFFSET] = 0;
+}
+
+static __inline__ void ide_init_default_hwifs(void)
+{
+}
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+} select_t;
+
+/* XXX is this correct? - Anton */
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned HOB : 1; /* 48-bit address ordering */
+ unsigned reserved456 : 3;
+ unsigned bit3 : 1; /* ATA-2 thingy */
+ unsigned SRST : 1; /* host soft reset bit */
+ unsigned nIEN : 1; /* device INTRQ to host */
+ unsigned bit0 : 1;
+ } b;
+} control_t;
+
+#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
+#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
+#define ide_check_region(from,extent) check_region((from), (extent))
+#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
+#define ide_release_region(from,extent) release_region((from), (extent))
+
+/*
+ * The following are not needed for the non-m68k ports
+ */
+#define ide_ack_intr(hwif) (1)
+#define ide_release_lock(lock) do {} while (0)
+#define ide_get_lock(lock, hdlr, data) do {} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASMPPC64_IDE_H */
--- /dev/null
+#ifndef _PPC64_INIT_H
+#define _PPC64_INIT_H
+
+#include <linux/init.h>
+
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 90 /* egcs */
+/* DRENG add back in when we get section attribute support */
+#define __chrp __attribute__ ((__section__ (".text.chrp")))
+#define __chrpdata __attribute__ ((__section__ (".data.chrp")))
+#define __chrpfunc(__argchrp) \
+ __argchrp __chrp; \
+ __argchrp
+
+/* this is actually just common chrp/pmac code, not OF code -- Cort */
+#define __openfirmware __attribute__ ((__section__ (".text.openfirmware")))
+#define __openfirmwaredata __attribute__ ((__section__ (".data.openfirmware")))
+#define __openfirmwarefunc(__argopenfirmware) \
+ __argopenfirmware __openfirmware; \
+ __argopenfirmware
+
+#else /* not egcs */
+
+#define __openfirmware
+#define __openfirmwaredata
+#define __openfirmwarefunc(x) x
+
+#endif /* egcs */
+
+#endif /* _PPC64_INIT_H */
--- /dev/null
+#ifndef _PPC64_IO_H
+#define _PPC64_IO_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/page.h>
+#include <asm/byteorder.h>
+#ifdef CONFIG_PPC_ISERIES
+#include <asm/iSeries/iSeries_io.h>
+#endif
+#include <asm/memory.h>
+#include <asm/delay.h>
+
+#define SIO_CONFIG_RA 0x398
+#define SIO_CONFIG_RD 0x399
+
+#define SLOW_DOWN_IO
+/* Define this if you want to see virt_to_* messages */
+#undef __IO_DEBUG
+
+extern unsigned long isa_io_base;
+extern unsigned long isa_mem_base;
+extern unsigned long pci_io_base;
+extern unsigned long pci_dram_offset;
+extern int have_print;
+#define _IO_BASE isa_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#define PCI_DRAM_OFFSET pci_dram_offset
+
+#ifdef CONFIG_PPC_ISERIES
+#define readb(addr) iSeries_Read_Byte((void*)(addr))
+#define readw(addr) iSeries_Read_Word((void*)(addr))
+#define readl(addr) iSeries_Read_Long((void*)(addr))
+#define writeb(data, addr) iSeries_Write_Byte(data,((void*)(addr)))
+#define writew(data, addr) iSeries_Write_Word(data,((void*)(addr)))
+#define writel(data, addr) iSeries_Write_Long(data,((void*)(addr)))
+#define memset_io(a,b,c) iSeries_memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) iSeries_memcpy_fromio((void *)(a), (void *)(b), (c))
+#define memcpy_toio(a,b,c) iSeries_memcpy_toio((void *)(a), (void *)(b), (c))
+#define inb(addr) readb(((unsigned long)(addr)))
+#define inw(addr) readw(((unsigned long)(addr)))
+#define inl(addr) readl(((unsigned long)(addr)))
+#define outb(data,addr) writeb(data,((unsigned long)(addr)))
+#define outw(data,addr) writew(data,((unsigned long)(addr)))
+#define outl(data,addr) writel(data,((unsigned long)(addr)))
+#else
+#define IS_MAPPED_VADDR(port) ((unsigned long)(port) >> 60UL)
+#ifdef CONFIG_PPC_EEH
+#define readb(addr) eeh_readb((void*)(addr))
+#define readw(addr) eeh_readw((void*)(addr))
+#define readl(addr) eeh_readl((void*)(addr))
+#define writeb(data, addr) eeh_writeb((data), ((void*)(addr)))
+#define writew(data, addr) eeh_writew((data), ((void*)(addr)))
+#define writel(data, addr) eeh_writel((data), ((void*)(addr)))
+#define memset_io(a,b,c) eeh_memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) eeh_memcpy_toio((void *)(a),(b),(c))
+#else
+#define readb(addr) in_8((volatile u8 *)(addr))
+#define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
+#define readw(addr) in_le16((volatile u16 *)(addr))
+#define readl(addr) in_le32((volatile u32 *)(addr))
+#define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
+#define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#endif
+#define inb(port) _inb((unsigned long)port)
+#define outb(val, port) _outb(val, (unsigned long)port)
+#define inw(port) _inw((unsigned long)port)
+#define outw(val, port) _outw(val, (unsigned long)port)
+#define inl(port) _inl((unsigned long)port)
+#define outl(val, port) _outl(val, (unsigned long)port)
+#endif
+
+
+/*
+ * output pause versions need a delay at least for the
+ * w83c105 ide controller in a p610.
+ */
+#define inb_p(port) inb(port)
+#define outb_p(val, port) (udelay(1), outb((val), (port)))
+#define inw_p(port) inw(port)
+#define outw_p(val, port) (udelay(1), outw((val), (port)))
+#define inl_p(port) inl(port)
+#define outl_p(val, port) (udelay(1), outl((val, (port)))
+
+/*
+ * The insw/outsw/insl/outsl macros don't do byte-swapping.
+ * They are only used in practice for transferring buffers which
+ * are arrays of bytes, and byte-swapping is not appropriate in
+ * that case. - paulus */
+#define _IOMAP_VADDR(port) (IS_MAPPED_VADDR(port) ? (port) : (port)+_IO_BASE)
+#define insb(port, buf, ns) _insb((u8 *)(_IOMAP_VADDR(port)), (buf), (ns))
+#define outsb(port, buf, ns) _outsb((u8 *)(_IOMAP_VADDR(port)), (buf), (ns))
+#define insw(port, buf, ns) _insw_ns((u16 *)(_IOMAP_VADDR(port)), (buf), (ns))
+#define outsw(port, buf, ns) _outsw_ns((u16 *)(_IOMAP_VADDR(port)), (buf), (ns))
+#define insl(port, buf, nl) _insl_ns((u32 *)(_IOMAP_VADDR(port)), (buf), (nl))
+#define outsl(port, buf, nl) _outsl_ns((u32 *)(_IOMAP_VADDR(port)), (buf), (nl))
+
+extern void _insb(volatile u8 *port, void *buf, int ns);
+extern void _outsb(volatile u8 *port, const void *buf, int ns);
+extern void _insw(volatile u16 *port, void *buf, int ns);
+extern void _outsw(volatile u16 *port, const void *buf, int ns);
+extern void _insl(volatile u32 *port, void *buf, int nl);
+extern void _outsl(volatile u32 *port, const void *buf, int nl);
+extern void _insw_ns(volatile u16 *port, void *buf, int ns);
+extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
+extern void _insl_ns(volatile u32 *port, void *buf, int nl);
+extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
+
+/*
+ * The *_ns versions below don't do byte-swapping.
+ * Neither do the standard versions now, these are just here
+ * for older code.
+ */
+#define insw_ns(port, buf, ns) _insw_ns((u16 *)(_IOMAP_VADDR(port)), (buf), (ns))
+#define outsw_ns(port, buf, ns) _outsw_ns((u16 *)(_IOMAP_VADDR(port)), (buf), (ns))
+#define insl_ns(port, buf, nl) _insl_ns((u32 *)(_IOMAP_VADDR(port)), (buf), (nl))
+#define outsl_ns(port, buf, nl) _outsl_ns((u32 *)(_IOMAP_VADDR(port)), (buf), (nl))
+
+
+#define IO_SPACE_LIMIT ~(0UL)
+#define MEM_SPACE_LIMIT ~(0UL)
+
+
+#ifdef __KERNEL__
+/*
+ * Map in an area of physical address space, for accessing
+ * I/O devices etc.
+ */
+extern void *__ioremap(unsigned long address, unsigned long size,
+ unsigned long flags);
+extern void *ioremap(unsigned long address, unsigned long size);
+#define ioremap_nocache(addr, size) ioremap((addr), (size))
+extern void iounmap(void *addr);
+
+/*
+ * Change virtual addresses to physical addresses and vv, for
+ * addresses in the area where the kernel has the RAM mapped.
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+#ifdef __IO_DEBUG
+ printk("virt_to_phys: 0x%08lx -> 0x%08lx\n",
+ (unsigned long) address,
+ __pa((unsigned long)address));
+#endif
+ return __pa((unsigned long)address);
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+#ifdef __IO_DEBUG
+ printk("phys_to_virt: 0x%08lx -> 0x%08lx\n", address, __va(address));
+#endif
+ return (void *) __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
+
+#if 0
+#define BIO_VMERGE_BOUNDARY 4096
+#endif
+
+#endif /* __KERNEL__ */
+
+extern inline void iosync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r() eieio()
+#define iobarrier_w() eieio()
+
+/*
+ * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
+ */
+extern inline int in_8(volatile unsigned char *addr)
+{
+ int ret;
+
+ __asm__ __volatile__("eieio; lbz%U1%X1 %0,%1" : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+extern inline void out_8(volatile unsigned char *addr, int val)
+{
+ __asm__ __volatile__("stb%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
+}
+
+extern inline int in_le16(volatile unsigned short *addr)
+{
+ int ret;
+
+ __asm__ __volatile__("eieio; lhbrx %0,0,%1" : "=r" (ret) :
+ "r" (addr), "m" (*addr));
+ return ret;
+}
+
+extern inline int in_be16(volatile unsigned short *addr)
+{
+ int ret;
+
+ __asm__ __volatile__("eieio; lhz%U1%X1 %0,%1" : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+extern inline void out_le16(volatile unsigned short *addr, int val)
+{
+ __asm__ __volatile__("sthbrx %1,0,%2" : "=m" (*addr) :
+ "r" (val), "r" (addr));
+}
+
+extern inline void out_be16(volatile unsigned short *addr, int val)
+{
+ __asm__ __volatile__("sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
+}
+
+extern inline unsigned in_le32(volatile unsigned *addr)
+{
+ unsigned ret;
+
+ __asm__ __volatile__("eieio; lwbrx %0,0,%1" : "=r" (ret) :
+ "r" (addr), "m" (*addr));
+ return ret;
+}
+
+extern inline unsigned in_be32(volatile unsigned *addr)
+{
+ unsigned ret;
+
+ __asm__ __volatile__("eieio; lwz%U1%X1 %0,%1" : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+extern inline void out_le32(volatile unsigned *addr, int val)
+{
+ __asm__ __volatile__("stwbrx %1,0,%2" : "=m" (*addr) :
+ "r" (val), "r" (addr));
+}
+
+extern inline void out_be32(volatile unsigned *addr, int val)
+{
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
+}
+
+#ifdef CONFIG_PPC_EEH
+#include <asm/eeh.h>
+#endif
+
+#ifndef CONFIG_PPC_ISERIES
+static inline u8 _inb(unsigned long port) {
+ if (IS_MAPPED_VADDR(port))
+ return readb((void *)port);
+ else if (_IO_BASE)
+ return in_8((u8 *)((port)+_IO_BASE));
+ else
+ return 0xff;
+}
+static inline void _outb(u8 val, unsigned long port) {
+ if (IS_MAPPED_VADDR(port))
+ return writeb(val, (void *)port);
+ else if (_IO_BASE)
+ out_8((u8 *)((port)+_IO_BASE), val);
+}
+static inline u16 _inw(unsigned long port) {
+ if (IS_MAPPED_VADDR(port))
+ return readw((void *)port);
+ else if (_IO_BASE)
+ return in_le16((u16 *)((port)+_IO_BASE));
+ else
+ return 0xffff;
+}
+static inline void _outw(u16 val, unsigned long port) {
+ if (IS_MAPPED_VADDR(port))
+ return writew(val, (void *)port);
+ else if (_IO_BASE)
+ out_le16((u16 *)((port)+_IO_BASE), val);
+}
+static inline u32 _inl(unsigned long port) {
+ if (IS_MAPPED_VADDR(port))
+ return readl((void *)port);
+ else if (_IO_BASE)
+ return in_le32((u32 *)((port)+_IO_BASE));
+ else
+ return 0xffffffff;
+}
+static inline void _outl(u32 val, unsigned long port) {
+ if (IS_MAPPED_VADDR(port))
+ return writel(val, (void *)port);
+ else if (_IO_BASE)
+ out_le32((u32 *)((port)+_IO_BASE), val);
+}
+#endif
+
+#ifdef __KERNEL__
+static inline int check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+#ifndef CONFIG_PPC_ISERIES
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+#endif
+ return retval;
+}
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PPC64_IO_H */
--- /dev/null
+#ifndef _PPC64_IOCTL_H
+#define _PPC64_IOCTL_H
+
+
+/*
+ * This was copied from the alpha as it's a bit cleaner there.
+ * -- Cort
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
+ * And this turns out useful to catch old ioctl numbers in header
+ * files for us.
+ */
+#define _IOC_NONE 1U
+#define _IOC_READ 2U
+#define _IOC_WRITE 4U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode them.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* various drivers, such as the pcmcia stuff, need these... */
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _PPC64_IOCTL_H */
--- /dev/null
+#ifndef _ASM_PPC64_IOCTLS_H
+#define _ASM_PPC64_IOCTLS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TIOCGETP _IOR('t', 8, struct sgttyb)
+#define TIOCSETP _IOW('t', 9, struct sgttyb)
+#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
+
+#define TIOCSETC _IOW('t', 17, struct tchars)
+#define TIOCGETC _IOR('t', 18, struct tchars)
+#define TCGETS _IOR('t', 19, struct termios)
+#define TCSETS _IOW('t', 20, struct termios)
+#define TCSETSW _IOW('t', 21, struct termios)
+#define TCSETSF _IOW('t', 22, struct termios)
+
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCGLTC _IOR('t', 116, struct ltchars)
+#define TIOCSLTC _IOW('t', 117, struct ltchars)
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+
+#define TIOCSTI 0x5412
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+
+
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+#endif /* _ASM_PPC64_IOCTLS_H */
--- /dev/null
+#ifndef __PPC64_IPC_H__
+#define __PPC64_IPC_H__
+
+/*
+ * These are used to wrap system calls on PowerPC.
+ *
+ * See arch/ppc/kernel/syscalls.c for ugly details..
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+struct ipc_kludge {
+ u32 msgp;
+ s32 msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif /* __PPC64_IPC_H__ */
--- /dev/null
+#ifndef __PPC64_IPCBUF_H__
+#define __PPC64_IPCBUF_H__
+
+/*
+ * The ipc64_perm structure for the PPC is identical to kern_ipc_perm
+ * as we have always had 32-bit UIDs and GIDs in the kernel.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __PPC64_IPCBUF_H__ */
--- /dev/null
+#ifdef __KERNEL__
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/atomic.h>
+
+extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
+extern void enable_irq(unsigned int);
+
+/*
+ * this is the maximum number of virtual irqs we will use.
+ */
+#define NR_IRQS 512
+
+#define NUM_8259_INTERRUPTS 16
+
+/* Interrupt numbers are virtual in case they are sparsely
+ * distributed by the hardware.
+ */
+#define NR_HW_IRQS 8192
+extern unsigned short real_irq_to_virt_map[NR_HW_IRQS];
+extern unsigned short virt_irq_to_real_map[NR_IRQS];
+/* Create a mapping for a real_irq if it doesn't already exist.
+ * Return the virtual irq as a convenience.
+ */
+unsigned long virt_irq_create_mapping(unsigned long real_irq);
+
+/* These funcs map irqs between real and virtual */
+static inline unsigned long real_irq_to_virt(unsigned long real_irq) {
+ return real_irq_to_virt_map[real_irq];
+}
+static inline unsigned long virt_irq_to_real(unsigned long virt_irq) {
+ return virt_irq_to_real_map[virt_irq];
+}
+
+/*
+ * This gets called from serial.c, which is now used on
+ * powermacs as well as prep/chrp boxes.
+ * Prep and chrp both have cascaded 8259 PICs.
+ */
+static __inline__ int irq_cannonicalize(int irq)
+{
+ return irq;
+}
+
+#endif /* _ASM_IRQ_H */
+#endif /* __KERNEL__ */
--- /dev/null
+/*
+ * linux/include/asm-ppc/keyboard.h
+ *
+ * Created 3 Nov 1996 by Geert Uytterhoeven
+ * Modified for Power Macintosh by Paul Mackerras
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This file contains the ppc architecture specific keyboard definitions -
+ * like the intel pc for prep systems, different for power macs.
+ */
+
+#ifndef __ASMPPC64_KEYBOARD_H
+#define __ASMPPC64_KEYBOARD_H
+
+#ifdef __KERNEL__
+
+#include <linux/adb.h>
+#include <asm/machdep.h>
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/kd.h>
+#include <asm/io.h>
+
+#define KEYBOARD_IRQ 1
+#define DISABLE_KBD_DURING_INTERRUPTS 0
+#define INIT_KBD
+
+static inline int kbd_setkeycode(unsigned int scancode, unsigned int keycode)
+{
+ if ( ppc_md.kbd_setkeycode )
+ return ppc_md.kbd_setkeycode(scancode, keycode);
+ else
+ return 0;
+}
+
+static inline int kbd_getkeycode(unsigned int scancode)
+{
+ if ( ppc_md.kbd_getkeycode )
+ return ppc_md.kbd_getkeycode(scancode);
+ else
+ return 0;
+}
+
+static inline int kbd_translate(unsigned char keycode, unsigned char *keycodep,
+ char raw_mode)
+{
+ if ( ppc_md.kbd_translate )
+ return ppc_md.kbd_translate(keycode, keycodep, raw_mode);
+ else
+ return 0;
+}
+
+static inline int kbd_unexpected_up(unsigned char keycode)
+{
+ if ( ppc_md.kbd_unexpected_up )
+ return ppc_md.kbd_unexpected_up(keycode);
+ else
+ return 0;
+}
+
+static inline void kbd_leds(unsigned char leds)
+{
+ if ( ppc_md.kbd_leds )
+ ppc_md.kbd_leds(leds);
+}
+
+static inline void kbd_init_hw(void)
+{
+ if ( ppc_md.kbd_init_hw )
+ ppc_md.kbd_init_hw();
+}
+
+#define kbd_sysrq_xlate (ppc_md.ppc_kbd_sysrq_xlate)
+
+extern unsigned long SYSRQ_KEY;
+#define E1_PAUSE 119 /* PAUSE key */
+
+/* resource allocation */
+#define kbd_request_region()
+#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \
+ "keyboard", NULL)
+
+/* How to access the keyboard macros on this platform. */
+#define kbd_read_input() inb(KBD_DATA_REG)
+static inline int kbd_read_status(void) {
+ int ret = inb(0x64);
+ return (ret == 0xff) ? 0 : ret; /* translate float to bad status. */
+}
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+
+/* Some stoneage hardware needs delays after some operations. */
+#define kbd_pause() do { } while(0)
+
+/*
+ * Machine specific bits for the PS/2 driver
+ */
+
+#define AUX_IRQ 12
+
+#define aux_request_irq(hand, dev_id) \
+ request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id)
+
+#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASMPPC64_KEYBOARD_H */
--- /dev/null
+/*
+ * kgdb.h: Defines and declarations for serial line source level
+ * remote debugging of the Linux kernel using gdb.
+ *
+ * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu)
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _PPC64_KGDB_H
+#define _PPC64_KGDB_H
+
+#ifndef __ASSEMBLY__
+/* To initialize the serial, first thing called */
+extern void zs_kgdb_hook(int tty_num);
+/* To init the kgdb engine. (called by serial hook)*/
+extern void set_debug_traps(void);
+
+/* To enter the debugger explicitly. */
+extern void breakpoint(void);
+
+/* For taking exceptions
+ * these are defined in traps.c
+ */
+extern void (*debugger)(struct pt_regs *regs);
+extern int (*debugger_bpt)(struct pt_regs *regs);
+extern int (*debugger_sstep)(struct pt_regs *regs);
+extern int (*debugger_iabr_match)(struct pt_regs *regs);
+extern int (*debugger_dabr_match)(struct pt_regs *regs);
+extern void (*debugger_fault_handler)(struct pt_regs *regs);
+
+/* What we bring to the party */
+int kgdb_bpt(struct pt_regs *regs);
+int kgdb_sstep(struct pt_regs *regs);
+void kgdb(struct pt_regs *regs);
+int kgdb_iabr_match(struct pt_regs *regs);
+int kgdb_dabr_match(struct pt_regs *regs);
+static void kgdb_fault_handler(struct pt_regs *regs);
+static void handle_exception (struct pt_regs *regs);
+
+/*
+ * external low-level support routines (ie macserial.c)
+ */
+extern void kgdb_interruptible(int); /* control interrupts from serial */
+extern void putDebugChar(char); /* write a single character */
+extern char getDebugChar(void); /* read and return a single char */
+
+#endif /* !(__ASSEMBLY__) */
+#endif /* !(_PPC64_KGDB_H) */
--- /dev/null
+/*
+ * include/asm-ppc64/linux_logo.h: A linux logo to be displayed on boot
+ * (pinched from the sparc port).
+ *
+ * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * You can put anything here, but:
+ * LINUX_LOGO_COLORS has to be less than 224
+ * values have to start from 0x20
+ * (i.e. linux_logo_{red,green,blue}[0] is color 0x20)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+
+#define linux_logo_banner "Linux/PPC-64 version " UTS_RELEASE
+
+#define LINUX_LOGO_HEIGHT 80
+#define LINUX_LOGO_WIDTH 80
+#define LINUX_LOGO_COLORS 214
+
+#ifdef INCLUDE_LINUX_LOGO_DATA
+
+#define INCLUDE_LINUX_LOGOBW
+#define INCLUDE_LINUX_LOGO16
+#include <linux/linux_logo.h>
+
+#else
+
+/* prototypes only */
+extern unsigned char linux_logo_red[];
+extern unsigned char linux_logo_green[];
+extern unsigned char linux_logo_blue[];
+extern unsigned char linux_logo[];
+extern unsigned char linux_logo_bw[];
+extern unsigned char linux_logo16_red[];
+extern unsigned char linux_logo16_green[];
+extern unsigned char linux_logo16_blue[];
+extern unsigned char linux_logo16[];
+
+#endif
--- /dev/null
+#ifndef _PPC64_LMB_H
+#define _PPC64_LMB_H
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 2001 Peter Bergner, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/prom.h>
+
+extern unsigned long reloc_offset(void);
+
+#define MAX_LMB_REGIONS 32
+
+union lmb_reg_property {
+ struct reg_property32 addr32[MAX_LMB_REGIONS];
+ struct reg_property64 addr64[MAX_LMB_REGIONS];
+};
+
+#define LMB_MEMORY_AREA 1
+#define LMB_IO_AREA 2
+
+struct lmb_property {
+ unsigned long base;
+ unsigned long physbase;
+ unsigned long size;
+ unsigned long type;
+};
+
+struct lmb_region {
+ unsigned long cnt;
+ unsigned long size;
+ unsigned long lcd_size; /* Least Common Denominator */
+ struct lmb_property region[MAX_LMB_REGIONS+1];
+};
+
+struct lmb {
+ unsigned long debug;
+ struct lmb_region memory;
+ struct lmb_region reserved;
+};
+
+extern struct lmb lmb;
+
+extern void lmb_init(void);
+extern void lmb_analyze(void);
+extern long lmb_add(unsigned long, unsigned long);
+extern long lmb_add_io(unsigned long base, unsigned long size);
+extern long lmb_reserve(unsigned long, unsigned long);
+extern unsigned long lmb_alloc(unsigned long, unsigned long);
+extern unsigned long lmb_phys_mem_size(void);
+extern unsigned long lmb_end_of_DRAM(void);
+extern unsigned long lmb_abs_to_phys(unsigned long);
+extern void lmb_dump(char *);
+
+static inline unsigned long
+lmb_addrs_overlap(unsigned long base1, unsigned long size1,
+ unsigned long base2, unsigned long size2)
+{
+ return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
+}
+
+static inline long
+lmb_regions_overlap(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
+{
+ unsigned long base1 = rgn->region[r1].base;
+ unsigned long size1 = rgn->region[r1].size;
+ unsigned long base2 = rgn->region[r2].base;
+ unsigned long size2 = rgn->region[r2].size;
+
+ return lmb_addrs_overlap(base1,size1,base2,size2);
+}
+
+static inline long
+lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
+ unsigned long base2, unsigned long size2)
+{
+ if ( base2 == base1 + size1 ) {
+ return 1;
+ } else if ( base1 == base2 + size2 ) {
+ return -1;
+ }
+ return 0;
+}
+
+static inline long
+lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
+{
+ unsigned long base1 = rgn->region[r1].base;
+ unsigned long size1 = rgn->region[r1].size;
+ unsigned long type1 = rgn->region[r1].type;
+ unsigned long base2 = rgn->region[r2].base;
+ unsigned long size2 = rgn->region[r2].size;
+ unsigned long type2 = rgn->region[r2].type;
+
+ return (type1 == type2) && lmb_addrs_adjacent(base1,size1,base2,size2);
+}
+
+#endif /* _PPC64_LMB_H */
--- /dev/null
+#ifdef __KERNEL__
+#ifndef _PPC_MACHDEP_H
+#define _PPC_MACHDEP_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/seq_file.h>
+
+struct pt_regs;
+struct pci_bus;
+struct pci_dev;
+struct kbd_repeat;
+struct device_node;
+struct TceTable;
+struct rtc_time;
+
+struct machdep_calls {
+ /* High use functions in the first cachelines, low use functions
+ * follow. DRENG collect profile data.
+ */
+ void (*hpte_invalidate)(unsigned long slot);
+
+ void (*hpte_updatepp)(long slot,
+ unsigned long newpp,
+ unsigned long va);
+ void (*hpte_updateboltedpp)(unsigned long newpp,
+ unsigned long ea);
+ unsigned long (*hpte_getword0)(unsigned long slot);
+
+ long (*hpte_find)( unsigned long vpn );
+
+ long (*hpte_selectslot)(unsigned long vpn);
+
+ void (*hpte_create_valid)(unsigned long slot,
+ unsigned long vpn,
+ unsigned long prpn,
+ unsigned hash,
+ void * ptep,
+ unsigned hpteflags,
+ unsigned bolted);
+ void (*tce_build)(struct TceTable * tbl,
+ long tcenum,
+ unsigned long uaddr,
+ int direction);
+ void (*tce_free)(struct TceTable *tbl,
+ dma_addr_t dma_addr,
+ unsigned order,
+ unsigned numPages);
+
+ void (*smp_message_pass)(int target,
+ int msg,
+ unsigned long data,
+ int wait);
+ int (*smp_probe)(void);
+ void (*smp_kick_cpu)(int nr);
+ void (*smp_setup_cpu)(int nr);
+
+ void (*setup_arch)(void);
+ /* Optional, may be NULL. */
+ void (*setup_residual)(struct seq_file *m, int cpu_id);
+ /* Optional, may be NULL. */
+ void (*get_cpuinfo)(struct seq_file *m);
+ /* Optional, may be NULL. */
+ unsigned int (*irq_cannonicalize)(unsigned int irq);
+ void (*init_IRQ)(void);
+ void (*init_ras_IRQ)(void);
+ int (*get_irq)(struct pt_regs *);
+ void (*post_irq)( struct pt_regs *, int );
+
+ /* A general init function, called by ppc_init in init/main.c.
+ May be NULL. */
+ void (*init)(void);
+
+ void (*restart)(char *cmd);
+ void (*power_off)(void);
+ void (*halt)(void);
+
+ long (*time_init)(void); /* Optional, may be NULL */
+ int (*set_rtc_time)(struct rtc_time *);
+ void (*get_rtc_time)(struct rtc_time *);
+ void (*get_boot_time)(struct rtc_time *);
+ void (*calibrate_decr)(void);
+
+ void (*progress)(char *, unsigned short);
+
+ unsigned char (*nvram_read_val)(int addr);
+ void (*nvram_write_val)(int addr, unsigned char val);
+
+/* Tons of keyboard stuff. */
+ int (*kbd_setkeycode)(unsigned int scancode,
+ unsigned int keycode);
+ int (*kbd_getkeycode)(unsigned int scancode);
+ int (*kbd_translate)(unsigned char scancode,
+ unsigned char *keycode,
+ char raw_mode);
+ char (*kbd_unexpected_up)(unsigned char keycode);
+ void (*kbd_leds)(unsigned char leds);
+ void (*kbd_init_hw)(void);
+#ifdef CONFIG_MAGIC_SYSRQ
+ unsigned char *ppc_kbd_sysrq_xlate;
+#endif
+
+ /* Debug interface. Low level I/O to some terminal device */
+ void (*udbg_putc)(unsigned char c);
+ unsigned char (*udbg_getc)(void);
+ int (*udbg_getc_poll)(void);
+
+ /* PCI interfaces */
+ int (*pcibios_read_config_byte)(struct device_node *dn, int offset, u8 *val);
+ int (*pcibios_read_config_word)(struct device_node *dn, int offset, u16 *val);
+ int (*pcibios_read_config_dword)(struct device_node *dn, int offset, u32 *val);
+ int (*pcibios_write_config_byte)(struct device_node *dn, int offset, u8 val);
+ int (*pcibios_write_config_word)(struct device_node *dn, int offset, u16 val);
+ int (*pcibios_write_config_dword)(struct device_node *dn, int offset, u32 val);
+
+ /* Called after scanning the bus, before allocating
+ * resources
+ */
+ void (*pcibios_fixup)(void);
+
+ /* Called for each PCI bus in the system
+ * when it's probed
+ */
+ void (*pcibios_fixup_bus)(struct pci_bus *);
+
+ /* Called when pci_enable_device() is called (initial=0) or
+ * when a device with no assigned resource is found (initial=1).
+ * Returns 0 to allow assignement/enabling of the device
+ */
+ int (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
+
+ void* (*pci_dev_io_base)(unsigned char bus, unsigned char devfn, int physical);
+ void* (*pci_dev_mem_base)(unsigned char bus, unsigned char devfn);
+ int (*pci_dev_root_bridge)(unsigned char bus, unsigned char devfn);
+
+ /* this is for modules, since _machine can be a define -- Cort */
+ int ppc_machine;
+};
+
+extern struct machdep_calls ppc_md;
+extern char cmd_line[512];
+
+extern void setup_pci_ptrs(void);
+
+/*
+ * Power macintoshes have either a CUDA or a PMU controlling
+ * system reset, power, NVRAM, RTC.
+ */
+typedef enum sys_ctrler_kind {
+ SYS_CTRLER_UNKNOWN = 0,
+ SYS_CTRLER_CUDA = 1,
+ SYS_CTRLER_PMU = 2,
+} sys_ctrler_t;
+
+extern sys_ctrler_t sys_ctrler;
+
+#endif /* _PPC_MACHDEP_H */
+#endif /* __KERNEL__ */
--- /dev/null
+/*
+ * Machine dependent access functions for RTC registers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_PPC64_MC146818RTC_H
+#define __ASM_PPC64_MC146818RTC_H
+
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif /* __ASM_PPC64_MC146818RTC_H */
--- /dev/null
+/*
+ * md.h: High speed xor_block operation for RAID4/5
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_MD_H
+#define __ASM_MD_H
+
+/* #define HAVE_ARCH_XORBLOCK */
+
+#define MD_XORBLOCK_ALIGNMENT sizeof(long)
+
+#endif /* __ASM_MD_H */
--- /dev/null
+#ifndef _ASM_PPC64_MEMORY_H_
+#define _ASM_PPC64_MEMORY_H_
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+
+/*
+ * Arguably the bitops and *xchg operations don't imply any memory barrier
+ * or SMP ordering, but in fact a lot of drivers expect them to imply
+ * both, since they do on x86 cpus.
+ */
+#ifdef CONFIG_SMP
+#define EIEIO_ON_SMP "eieio\n"
+#define ISYNC_ON_SMP "\n\tisync"
+#else
+#define EIEIO_ON_SMP
+#define ISYNC_ON_SMP
+#endif
+
+static inline void eieio(void)
+{
+ __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+ __asm__ __volatile__ ("isync" : : : "memory");
+}
+
+#ifdef CONFIG_SMP
+#define eieio_on_smp() eieio()
+#define isync_on_smp() isync()
+#else
+#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
+#define isync_on_smp() __asm__ __volatile__("": : :"memory")
+#endif
+
+#endif
--- /dev/null
+#ifndef __PPC64_MMAN_H__
+#define __PPC64_MMAN_H__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_NONE 0x0 /* page can not be accessed */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x0f /* Mask for type of mapping */
+#define MAP_FIXED 0x10 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
+#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
+#define MS_ASYNC 1 /* sync memory asynchronously */
+#define MS_INVALIDATE 2 /* invalidate the caches */
+#define MS_SYNC 4 /* synchronous memory sync */
+
+#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
+#define MCL_FUTURE 0x4000 /* lock all additions to address space */
+
+#define MADV_NORMAL 0x0 /* default page-in behavior */
+#define MADV_RANDOM 0x1 /* page-in minimum required */
+#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
+#define MADV_WILLNEED 0x3 /* pre-fault pages */
+#define MADV_DONTNEED 0x4 /* discard these pages */
+
+/* compatibility flags */
+#define MAP_ANON MAP_ANONYMOUS
+#define MAP_FILE 0
+
+#endif /* __PPC64_MMAN_H__ */
--- /dev/null
+/*
+ * PowerPC memory management structures
+ *
+ * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
+ * PPC64 rework.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC64_MMU_H_
+#define _PPC64_MMU_H_
+
+#ifndef __ASSEMBLY__
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+/*
+ * Define the size of the cache used for segment table entries. The first
+ * entry is used as a cache pointer, therefore the actual number of entries
+ * stored is one less than defined here. Do not change this value without
+ * considering the impact it will have on the layout of the paca in Paca.h.
+ */
+#define STAB_CACHE_SIZE 16
+
+/*
+ * Hardware Segment Lookaside Buffer Entry
+ * This structure has been padded out to two 64b doublewords (actual SLBE's are
+ * 94 bits). This padding facilites use by the segment management
+ * instructions.
+ */
+typedef struct {
+ unsigned long esid: 36; /* Effective segment ID */
+ unsigned long resv0:20; /* Reserved */
+ unsigned long v: 1; /* Entry valid (v=1) or invalid */
+ unsigned long resv1: 1; /* Reserved */
+ unsigned long ks: 1; /* Supervisor (privileged) state storage key */
+ unsigned long kp: 1; /* Problem state storage key */
+ unsigned long n: 1; /* No-execute if n=1 */
+ unsigned long resv2: 3; /* padding to a 64b boundary */
+} ste_dword0;
+
+typedef struct {
+ unsigned long vsid: 52; /* Virtual segment ID */
+ unsigned long resv0:12; /* Padding to a 64b boundary */
+} ste_dword1;
+
+typedef struct _STE {
+ union {
+ unsigned long dword0;
+ ste_dword0 dw0;
+ } dw0;
+
+ union {
+ unsigned long dword1;
+ ste_dword1 dw1;
+ } dw1;
+} STE;
+
+typedef struct {
+ unsigned long esid: 36; /* Effective segment ID */
+ unsigned long v: 1; /* Entry valid (v=1) or invalid */
+ unsigned long null1:15; /* padding to a 64b boundary */
+ unsigned long index:12; /* Index to select SLB entry. Used by slbmte */
+} slb_dword0;
+
+typedef struct {
+ unsigned long vsid: 52; /* Virtual segment ID */
+ unsigned long ks: 1; /* Supervisor (privileged) state storage key */
+ unsigned long kp: 1; /* Problem state storage key */
+ unsigned long n: 1; /* No-execute if n=1 */
+ unsigned long l: 1; /* Virt pages are large (l=1) or 4KB (l=0) */
+ unsigned long c: 1; /* Class */
+ unsigned long resv0: 7; /* Padding to a 64b boundary */
+} slb_dword1;
+
+typedef struct _SLBE {
+ union {
+ unsigned long dword0;
+ slb_dword0 dw0;
+ } dw0;
+
+ union {
+ unsigned long dword1;
+ slb_dword1 dw1;
+ } dw1;
+} SLBE;
+
+/*
+ * This structure is used in Paca.h where the layout depends on the
+ * size being 24B.
+ */
+typedef struct {
+ unsigned long real;
+ unsigned long virt;
+ unsigned long next_round_robin;
+} STAB;
+
+/* Hardware Page Table Entry */
+
+#define HPTES_PER_GROUP 8
+
+typedef struct {
+ unsigned long avpn:57; /* vsid | api == avpn */
+ unsigned long : 2; /* Software use */
+ unsigned long bolted: 1; /* HPTE is "bolted" */
+ unsigned long : 1; /* Software use */
+ unsigned long : 1; /* Reserved */
+ unsigned long h: 1; /* Hash function identifier */
+ unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
+} Hpte_dword0;
+
+typedef struct {
+ unsigned long : 6; /* unused - padding */
+ unsigned long ac: 1; /* Address compare */
+ unsigned long r: 1; /* Referenced */
+ unsigned long c: 1; /* Changed */
+ unsigned long w: 1; /* Write-thru cache mode */
+ unsigned long i: 1; /* Cache inhibited */
+ unsigned long m: 1; /* Memory coherence required */
+ unsigned long g: 1; /* Guarded */
+ unsigned long n: 1; /* No-execute */
+ unsigned long pp: 2; /* Page protection bits 1:2 */
+} Hpte_flags;
+
+typedef struct {
+ unsigned long pp0: 1; /* Page protection bit 0 */
+ unsigned long : 1; /* Reserved */
+ unsigned long rpn: 50; /* Real page number */
+ unsigned long : 2; /* Reserved */
+ unsigned long ac: 1; /* Address compare */
+ unsigned long r: 1; /* Referenced */
+ unsigned long c: 1; /* Changed */
+ unsigned long w: 1; /* Write-thru cache mode */
+ unsigned long i: 1; /* Cache inhibited */
+ unsigned long m: 1; /* Memory coherence required */
+ unsigned long g: 1; /* Guarded */
+ unsigned long n: 1; /* No-execute */
+ unsigned long pp: 2; /* Page protection bits 1:2 */
+} Hpte_dword1;
+
+typedef struct {
+ char padding[6]; /* padding */
+ unsigned long : 6; /* padding */
+ unsigned long flags: 10; /* HPTE flags */
+} Hpte_dword1_flags;
+
+typedef struct _HPTE {
+ union {
+ unsigned long dword0;
+ Hpte_dword0 dw0;
+ } dw0;
+
+ union {
+ unsigned long dword1;
+ struct {
+ unsigned long pp0: 1; /* Page protection bit 0 */
+ unsigned long ts: 1; /* Tag set bit */
+ unsigned long rpn: 50; /* Real page number */
+ unsigned long : 2; /* Unused */
+ unsigned long ac: 1; /* Address compare bit */
+ unsigned long r: 1; /* Referenced */
+ unsigned long c: 1; /* Changed */
+ unsigned long w: 1; /* Write-thru cache mode */
+ unsigned long i: 1; /* Cache inhibited */
+ unsigned long m: 1; /* Memory coherence */
+ unsigned long g: 1; /* Guarded */
+ unsigned long n: 1; /* No-execute page if N=1 */
+ unsigned long pp: 2; /* Page protection bit 1:2 */
+ } dw1;
+ } dw1;
+} HPTE;
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+/* pp0 will always be 0 for linux */
+#define PP_RWXX 0 /* Supervisor read/write, User none */
+#define PP_RWRX 1 /* Supervisor read/write, User read */
+#define PP_RWRW 2 /* Supervisor read/write, User read/write */
+#define PP_RXRX 3 /* Supervisor read, User read */
+
+
+typedef struct {
+ HPTE * htab;
+ unsigned long htab_num_ptegs;
+ unsigned long htab_hash_mask;
+ unsigned long next_round_robin;
+ unsigned long last_kernel_address;
+} HTAB;
+
+extern HTAB htab_data;
+
+void invalidate_hpte( unsigned long slot );
+long select_hpte_slot( unsigned long vpn );
+void create_valid_hpte( unsigned long slot, unsigned long vpn,
+ unsigned long prpn, unsigned hash,
+ void * ptep, unsigned hpteflags,
+ unsigned bolted );
+
+#define PD_SHIFT (10+12) /* Page directory */
+#define PD_MASK 0x02FF
+#define PT_SHIFT (12) /* Page Table */
+#define PT_MASK 0x02FF
+
+static inline unsigned long hpt_hash(unsigned long vpn, int large)
+{
+ unsigned long vsid;
+ unsigned long page;
+
+ if (large) {
+ vsid = vpn >> 4;
+ page = vpn & 0xf;
+ } else {
+ vsid = vpn >> 16;
+ page = vpn & 0xffff;
+ }
+
+ return (vsid & 0x7fffffffff) ^ page;
+}
+
+#define PG_SHIFT (12) /* Page Entry */
+
+extern __inline__ void _tlbie( unsigned long va )
+{
+ __asm__ __volatile__ ( " \n\
+ clrldi %0,%0,16 \n\
+ ptesync \n\
+ tlbie %0 \n\
+ eieio \n\
+ tlbsync \n\
+ ptesync"
+ : : "r" (va) : "memory" );
+}
+
+#endif /* __ASSEMBLY__ */
+
+/* Block size masks */
+#define BL_128K 0x000
+#define BL_256K 0x001
+#define BL_512K 0x003
+#define BL_1M 0x007
+#define BL_2M 0x00F
+#define BL_4M 0x01F
+#define BL_8M 0x03F
+#define BL_16M 0x07F
+#define BL_32M 0x0FF
+#define BL_64M 0x1FF
+#define BL_128M 0x3FF
+#define BL_256M 0x7FF
+
+/* Used to set up SDR1 register */
+#define HASH_TABLE_SIZE_64K 0x00010000
+#define HASH_TABLE_SIZE_128K 0x00020000
+#define HASH_TABLE_SIZE_256K 0x00040000
+#define HASH_TABLE_SIZE_512K 0x00080000
+#define HASH_TABLE_SIZE_1M 0x00100000
+#define HASH_TABLE_SIZE_2M 0x00200000
+#define HASH_TABLE_SIZE_4M 0x00400000
+#define HASH_TABLE_MASK_64K 0x000
+#define HASH_TABLE_MASK_128K 0x001
+#define HASH_TABLE_MASK_256K 0x003
+#define HASH_TABLE_MASK_512K 0x007
+#define HASH_TABLE_MASK_1M 0x00F
+#define HASH_TABLE_MASK_2M 0x01F
+#define HASH_TABLE_MASK_4M 0x03F
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define MI_AP 786
+#define MI_Ks 0x80000000 /* Should not be set */
+#define MI_Kp 0x40000000 /* Should always be set */
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MI_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define MI_EPN 787
+#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MI_EVALID 0x00000200 /* Entry is valid */
+#define MI_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the instruction TLB, it contains bits that get loaded into the
+ * TLB entry when the MI_RPN is written.
+ */
+#define MI_TWC 789
+#define MI_APG 0x000001e0 /* Access protection group (0) */
+#define MI_GUARDED 0x00000010 /* Guarded storage */
+#define MI_PSMASK 0x0000000c /* Mask of page size bits */
+#define MI_PS8MEG 0x0000000c /* 8M page size */
+#define MI_PS512K 0x00000004 /* 512K page size */
+#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MI_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the instruction TLB, using
+ * additional information from the MI_EPN, and MI_TWC registers.
+ */
+#define MI_RPN 790
+
+/* Define an RPN value for mapping kernel memory to large virtual
+ * pages for boot initialization. This has real page number of 0,
+ * large page size, shared page, cache enabled, and valid.
+ * Also mark all subpages valid and write access.
+ */
+#define MI_BOOTINIT 0x000001fd
+
+#define MD_CTR 792 /* Data TLB control register */
+#define MD_GPM 0x80000000 /* Set domain manager mode */
+#define MD_PPM 0x40000000 /* Set subpage protection */
+#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
+#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
+#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
+#define MD_RESETVAL 0x04000000 /* Value of register at reset */
+
+#define M_CASID 793 /* Address space ID (context) to match */
+#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
+
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define MD_AP 794
+#define MD_Ks 0x80000000 /* Should not be set */
+#define MD_Kp 0x40000000 /* Should always be set */
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MD_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define MD_EPN 795
+#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MD_EVALID 0x00000200 /* Entry is valid */
+#define MD_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* The pointer to the base address of the first level page table.
+ * During a software tablewalk, reading this register provides the address
+ * of the entry associated with MD_EPN.
+ */
+#define M_TWB 796
+#define M_L1TB 0xfffff000 /* Level 1 table base address */
+#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the data TLB, it contains bits that get loaded into the TLB entry
+ * when the MD_RPN is written. It is also provides the hardware assist
+ * for finding the PTE address during software tablewalk.
+ */
+#define MD_TWC 797
+#define MD_L2TB 0xfffff000 /* Level 2 table base address */
+#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
+#define MD_APG 0x000001e0 /* Access protection group (0) */
+#define MD_GUARDED 0x00000010 /* Guarded storage */
+#define MD_PSMASK 0x0000000c /* Mask of page size bits */
+#define MD_PS8MEG 0x0000000c /* 8M page size */
+#define MD_PS512K 0x00000004 /* 512K page size */
+#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MD_WT 0x00000002 /* Use writethrough page attribute */
+#define MD_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the data TLB, using
+ * additional information from the MD_EPN, and MD_TWC registers.
+ */
+#define MD_RPN 798
+
+/* This is a temporary storage register that could be used to save
+ * a processor working register during a tablewalk.
+ */
+#define M_TW 799
+
+#endif /* _PPC64_MMU_H_ */
--- /dev/null
+#ifndef __PPC64_MMU_CONTEXT_H
+#define __PPC64_MMU_CONTEXT_H
+
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mmu.h>
+#include <asm/ppcdebug.h>
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(unsigned long *b)
+{
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+}
+
+#define NO_CONTEXT 0
+#define FIRST_USER_CONTEXT 0x10 /* First 16 reserved for kernel */
+#define LAST_USER_CONTEXT 0x8000 /* Same as PID_MAX for now... */
+#define NUM_USER_CONTEXT (LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
+
+/* Choose whether we want to implement our context
+ * number allocator as a LIFO or FIFO queue.
+ */
+#if 1
+#define MMU_CONTEXT_LIFO
+#else
+#define MMU_CONTEXT_FIFO
+#endif
+
+struct mmu_context_queue_t {
+ spinlock_t lock;
+ long head;
+ long tail;
+ long size;
+ mm_context_t elements[LAST_USER_CONTEXT];
+};
+
+extern struct mmu_context_queue_t mmu_context_queue;
+
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
+extern void flush_stab(void);
+
+/*
+ * The context number queue has underflowed.
+ * Meaning: we tried to push a context number that was freed
+ * back onto the context queue and the queue was already full.
+ */
+static inline void
+mmu_context_underflow(void)
+{
+ printk(KERN_DEBUG "mmu_context_underflow\n");
+ panic("mmu_context_underflow");
+}
+
+
+/*
+ * Set up the context for a new address space.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ long head, size;
+
+ spin_lock( &mmu_context_queue.lock );
+
+ if ( (size = mmu_context_queue.size) <= 0 ) {
+ spin_unlock( &mmu_context_queue.lock );
+ return -ENOMEM;
+ }
+
+ head = mmu_context_queue.head;
+ mm->context = mmu_context_queue.elements[head];
+
+ head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
+ mmu_context_queue.head = head;
+ mmu_context_queue.size = size-1;
+
+ spin_unlock( &mmu_context_queue.lock );
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void
+destroy_context(struct mm_struct *mm)
+{
+ long index, size = mmu_context_queue.size;
+
+ spin_lock( &mmu_context_queue.lock );
+
+ if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) {
+ spin_unlock( &mmu_context_queue.lock );
+ mmu_context_underflow();
+ }
+
+#ifdef MMU_CONTEXT_LIFO
+ index = mmu_context_queue.head;
+ index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
+ mmu_context_queue.head = index;
+#else
+ index = mmu_context_queue.tail;
+ index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
+ mmu_context_queue.tail = index;
+#endif
+
+ mmu_context_queue.size = size+1;
+ mmu_context_queue.elements[index] = mm->context;
+
+ spin_unlock( &mmu_context_queue.lock );
+}
+
+
+/*
+ * switch_mm is the entry point called from the architecture independent
+ * code in kernel/sched.c
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk, int cpu)
+{
+ tsk->thread.pgdir = next->pgd; /* cache the pgdir in the thread
+ maybe not needed any more */
+ flush_stab();
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void
+activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
+{
+ current->thread.pgdir = mm->pgd;
+ flush_stab();
+}
+
+
+#define VSID_RANDOMIZER 42470972311
+#define VSID_MASK 0xfffffffff
+
+
+/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
+ */
+static inline unsigned long
+get_kernel_vsid( unsigned long ea )
+{
+ unsigned long ordinal, vsid;
+
+ ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | (ea >> 60);
+ vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
+
+ ifppcdebug(PPCDBG_HTABSTRESS) {
+ /* For debug, this path creates a very poor vsid distribuition.
+ * A user program can access virtual addresses in the form
+ * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
+ * to hash to the same page table group.
+ */
+ ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
+ vsid = ordinal & VSID_MASK;
+ }
+
+ return vsid;
+}
+
+/* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
+ */
+static inline unsigned long
+get_vsid( unsigned long context, unsigned long ea )
+{
+ unsigned long ordinal, vsid;
+
+ ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
+ vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
+
+ ifppcdebug(PPCDBG_HTABSTRESS) {
+ /* See comment above. */
+ ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
+ vsid = ordinal & VSID_MASK;
+ }
+
+ return vsid;
+}
+
+#endif /* __PPC64_MMU_CONTEXT_H */
--- /dev/null
+#ifndef _ASM_PPC64_MODULE_H
+#define _ASM_PPC64_MODULE_H
+/*
+ * This file contains the PPC architecture specific module code.
+ *
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define module_map(x) vmalloc(x)
+#define module_unmap(x) vfree(x)
+#define arch_init_modules(x) do { } while (0)
+#define module_arch_init(x) (0)
+#endif /* _ASM_PPC64_MODULE_H */
--- /dev/null
+#ifndef _PPC64_MSGBUF_H
+#define _PPC64_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for the PPC architecture.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _PPC64_MSGBUF_H */
--- /dev/null
+/*
+ * linux/include/asm-ppc/namei.h
+ * Adapted from linux/include/asm-alpha/namei.h
+ *
+ * Included from linux/fs/namei.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __PPC64_NAMEI_H
+#define __PPC64_NAMEI_H
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* __PPC64_NAMEI_H */
--- /dev/null
+/*
+ * PreP compliant NVRAM access
+ * This needs to be updated for PPC64
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC64_NVRAM_H
+#define _PPC64_NVRAM_H
+
+#define NVRAM_AS0 0x74
+#define NVRAM_AS1 0x75
+#define NVRAM_DATA 0x77
+
+
+/* RTC Offsets */
+
+#define MOTO_RTC_SECONDS 0x1FF9
+#define MOTO_RTC_MINUTES 0x1FFA
+#define MOTO_RTC_HOURS 0x1FFB
+#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
+#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
+#define MOTO_RTC_MONTH 0x1FFE
+#define MOTO_RTC_YEAR 0x1FFF
+#define MOTO_RTC_CONTROLA 0x1FF8
+#define MOTO_RTC_CONTROLB 0x1FF9
+
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+/* PowerMac specific nvram stuffs */
+
+enum {
+ pmac_nvram_OF, /* Open Firmware partition */
+ pmac_nvram_XPRAM, /* MacOS XPRAM partition */
+ pmac_nvram_NR /* MacOS Name Registry partition */
+};
+
+/* Return partition offset in nvram */
+extern int pmac_get_partition(int partition);
+
+/* Direct access to XPRAM */
+extern u8 pmac_xpram_read(int xpaddr);
+extern void pmac_xpram_write(int xpaddr, u8 data);
+
+/* Some offsets in XPRAM */
+#define PMAC_XPRAM_MACHINE_LOC 0xe4
+#define PMAC_XPRAM_SOUND_VOLUME 0x08
+
+/* Machine location structure in XPRAM */
+struct pmac_machine_location {
+ u32 latitude; /* 2+30 bit Fractional number */
+ u32 longitude; /* 2+30 bit Fractional number */
+ u32 delta; /* mix of GMT delta and DLS */
+};
+
+/* /dev/nvram ioctls */
+#define PMAC_NVRAM_GET_OFFSET _IOWR('p', 0x40, int) /* Get NVRAM partition offset */
+
+#endif /* _PPC64_NVRAM_H */
--- /dev/null
+#ifndef _PPC64_PAGE_H
+#define _PPC64_PAGE_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_OFFSET_MASK (PAGE_SIZE-1)
+
+#define SID_SHIFT 28
+#define SID_MASK 0xfffffffff
+#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
+
+/* Define an illegal instr to trap on the bug.
+ * We don't use 0 because that marks the end of a function
+ * in the ELF ABI. That's "Boo Boo" in case you wonder...
+ */
+#define BUG_OPCODE .long 0x00b00b00 /* For asm */
+#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#include <asm/Naca.h>
+
+#define STRICT_MM_TYPECHECKS
+
+#define REGION_SIZE 4UL
+#define OFFSET_SIZE 60UL
+#define REGION_SHIFT 60UL
+#define OFFSET_SHIFT 0UL
+#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
+#define REGION_STRIDE (1UL << REGION_SHIFT)
+
+typedef union ppc64_va {
+ struct {
+ unsigned long off : OFFSET_SIZE; /* intra-region offset */
+ unsigned long reg : REGION_SIZE; /* region number */
+ } f;
+ unsigned long l;
+ void *p;
+} ppc64_va;
+
+static __inline__ void clear_page(void *addr)
+{
+ unsigned long lines, line_size;
+
+ line_size = naca->dCacheL1LineSize;
+ lines = naca->dCacheL1LinesPerPage;
+
+ __asm__ __volatile__(
+" mtctr %1\n\
+1: dcbz 0,%0\n\
+ add %0,%0,%3\n\
+ bdnz+ 1b"
+ : "=r" (addr)
+ : "r" (lines), "0" (addr), "r" (line_size)
+ : "ctr", "memory");
+}
+
+extern void copy_page(void *to, void *from);
+struct page;
+extern void clear_user_page(void *page, unsigned long vaddr);
+extern void copy_user_page(void *to, void *from, unsigned long vaddr);
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking.
+ * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned int pmd; } pmd_t;
+typedef struct { unsigned int pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned int pmd_t;
+typedef unsigned int pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+#ifdef CONFIG_XMON
+#include <asm/ptrace.h>
+extern void xmon(struct pt_regs *excp);
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ xmon(0); \
+} while (0)
+#else
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
+} while (0)
+#endif
+
+#define PAGE_BUG(page) do { BUG(); } while (0)
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+
+#endif /* __ASSEMBLY__ */
+
+/* align addr on a size boundry - adjust address up/down if needed */
+#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundry - adjust address up if needed */
+#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
+
+/* to align the pointer to the (next) double word boundary */
+#define DOUBLEWORD_ALIGN(addr) _ALIGN(addr,sizeof(unsigned long))
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
+
+#ifdef MODULE
+#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
+#else
+#define __page_aligned \
+ __attribute__((__aligned__(PAGE_SIZE), \
+ __section__(".data.page_aligned")))
+#endif
+
+
+/* This must match the -Ttext linker address */
+/* Note: tophys & tovirt make assumptions about how */
+/* KERNELBASE is defined for performance reasons. */
+/* When KERNELBASE moves, those macros may have */
+/* to change! */
+#define PAGE_OFFSET 0xC000000000000000
+#define KERNELBASE PAGE_OFFSET
+#define VMALLOCBASE 0xD000000000000000
+#define IOREGIONBASE 0xE000000000000000
+
+#define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT)
+#define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT)
+#define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT)
+#define USER_REGION_ID (0UL)
+#define REGION_ID(X) (((unsigned long)(X))>>REGION_SHIFT)
+
+/*
+ * Define valid/invalid EA bits (for all ranges)
+ */
+#define VALID_EA_BITS (0x000001ffffffffffUL)
+#define INVALID_EA_BITS (~(REGION_MASK|VALID_EA_BITS))
+
+#define IS_VALID_REGION_ID(x) \
+ (((x) == USER_REGION_ID) || ((x) >= KERNEL_REGION_ID))
+#define IS_VALID_EA(x) \
+ ((!((x) & INVALID_EA_BITS)) && IS_VALID_REGION_ID(REGION_ID(x)))
+
+#define __bpn_to_ba(x) ((((unsigned long)(x))<<PAGE_SHIFT) + KERNELBASE)
+#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
+
+#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
+
+/* Given that physical addresses do not map 1-1 to absolute addresses, we
+ * use these macros to better specify exactly what we want to do.
+ * The only restriction on their use is that the absolute address
+ * macros cannot be used until after the LMB structure has been
+ * initialized in prom.c. -Peter
+ */
+#define __v2p(x) ((void *) __pa(x))
+#define __v2a(x) ((void *) phys_to_absolute(__pa(x)))
+#define __p2a(x) ((void *) phys_to_absolute(x))
+#define __p2v(x) ((void *) __va(x))
+#define __a2p(x) ((void *) absolute_to_phys(x))
+#define __a2v(x) ((void *) __va(absolute_to_phys(x)))
+
+#define virt_to_page(kaddr) (mem_map+(__pa((unsigned long)kaddr) >> PAGE_SHIFT))
+
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+
+#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_PAGE_H */
--- /dev/null
+#ifndef _ASM_PPC64_PARAM_H
+#define _ASM_PPC64_PARAM_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef HZ
+#define HZ 1024
+#ifdef __KERNEL__
+#if HZ == 100
+/* ppc (like X86) is defined to provide userspace with a world where HZ=100
+ We have to do this, (x*const)/const2 isnt optimised out because its not
+ a null operation as it might overflow.. */
+#define hz_to_std(a) (a)
+#else
+#define hz_to_std(a) ((a)*(100/HZ)+((a)*(100%HZ))/HZ)
+#endif
+#endif
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#ifdef __KERNEL__
+# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
+#endif
+
+#endif /* _ASM_PPC64_PARAM_H */
--- /dev/null
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_PPC64_PARPORT_H
+#define _ASM_PPC64_PARPORT_H
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports (autoirq, autodma);
+}
+
+#endif /* !(_ASM_PPC_PARPORT_H) */
--- /dev/null
+#ifdef __KERNEL__
+#ifndef _ASM_PCI_BRIDGE_H
+#define _ASM_PCI_BRIDGE_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pci_io_base returns the memory address at which you can access
+ * the I/O space for PCI bus number `bus' (or NULL on error).
+ */
+extern void *pci_bus_io_base(unsigned int bus);
+extern unsigned long pci_bus_io_base_phys(unsigned int bus);
+extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
+
+/* Get the PCI host controller for a bus */
+extern struct pci_controller* pci_bus_to_hose(int bus);
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node* node);
+
+enum phb_types {
+ phb_type_unknown = 0x0,
+ phb_type_hypervisor = 0x1,
+ phb_type_python = 0x10,
+ phb_type_speedwagon = 0x11
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+ char what[8]; /* Eye catcher */
+ enum phb_types type; /* Type of hardware */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ void *io_base_virt;
+ unsigned long io_base_phys;
+
+ /* Some machines (PReP) have a non 1:1 mapping of
+ * the PCI memory space in the CPU bus space
+ */
+ unsigned long pci_mem_offset;
+ unsigned long pci_io_offset;
+
+ struct pci_ops *ops;
+ volatile unsigned long *cfg_addr;
+ volatile unsigned char *cfg_data;
+ volatile unsigned long *phb_regs;
+ volatile unsigned long *chip_regs;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+ int global_number;
+ int local_number;
+ int system_bus_number;
+ unsigned long buid;
+ unsigned long dma_window_base_cur;
+ unsigned long dma_window_size;
+};
+
+
+/* This version handles the new Uni-N host bridge, the iobase is now
+ * a per-device thing. I also added the memory base so PReP can
+ * be fixed to return 0xc0000000 (I didn't actually implement it)
+ *
+ * pci_dev_io_base() returns either a virtual (ioremap'ed) address or
+ * a physical address. In-kernel clients will use logical while the
+ * sys_pciconfig_iobase syscall returns a physical one to userland.
+ */
+void *pci_dev_io_base(unsigned char bus, unsigned char devfn, int physical);
+void *pci_dev_mem_base(unsigned char bus, unsigned char devfn);
+
+/* Returns the root-bridge number (Uni-N number) of a device */
+int pci_dev_root_bridge(unsigned char bus, unsigned char devfn);
+
+/*
+ * pci_device_loc returns the bus number and device/function number
+ * for a device on a PCI bus, given its device_node struct.
+ * It returns 0 if OK, -1 on error.
+ */
+int pci_device_loc(struct device_node *dev, unsigned char *bus_ptr,
+ unsigned char *devfn_ptr);
+
+struct bridge_data {
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
+ void *io_base; /* virtual */
+ unsigned long io_base_phys;
+ int bus_number;
+ int max_bus;
+ struct bridge_data *next;
+ struct device_node *node;
+};
+
+#endif
+#endif /* __KERNEL__ */
--- /dev/null
+#ifndef __PPC64_PCI_H
+#define __PPC64_PCI_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
+#define IOBASE_BRIDGE_NUMBER 0
+#define IOBASE_MEMORY 1
+#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader.
+ */
+extern int pcibios_assign_all_busses(void);
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+extern inline void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+extern inline void pcibios_penalize_isa_irq(int irq)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+
+struct pci_dev;
+#define REG_SAVE_SIZE 64
+/************************************************************************
+ * Structure to hold the data for PCI Register Save/Restore functions. *
+ ************************************************************************/
+struct pci_config_reg_save_area {
+ struct pci_dev* PciDev; /* Pointer to device(Sanity Check) */
+ int Flags; /* Control & Info Flags */
+ int RCode; /* Return Code on Save/Restore */
+ int Register; /* Pointer to current register. */
+ u8 Regs[REG_SAVE_SIZE]; /* Save Area */
+};
+/************************************************************************
+ * Functions to support device reset *
+ ************************************************************************/
+extern int pci_reset_device(struct pci_dev*, int, int);
+extern int pci_save_config_regs(struct pci_dev*,struct pci_config_reg_save_area*);
+extern int pci_restore_config_regs(struct pci_dev*,struct pci_config_reg_save_area*);
+extern char* pci_card_location(struct pci_dev*);
+
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
+ size_t size, int direction);
+extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction);
+extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+
+extern void pSeries_pcibios_init_early(void);
+
+extern inline void pci_dma_sync_single(struct pci_dev *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ BUG();
+ /* nothing to do */
+}
+
+extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ BUG();
+ /* nothing to do */
+}
+
+/* Return whether the given PCI device DMA address mask can
+ * be supported properly. For example, if your device can
+ * only drive the low 24-bits during PCI bus mastering, then
+ * you would pass 0x00ffffff as the mask to this function.
+ */
+extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+{
+ return 1;
+}
+
+/* Return the index of the PCI controller for device PDEV. */
+extern int pci_controller_num(struct pci_dev *pdev);
+
+struct vm_area_struct;
+/* Map a range of PCI memory or I/O space for a device into user space */
+int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
+#define HAVE_PCI_MMAP 1
+
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->dma_length)
+
+#define pci_map_page(dev, page, off, size, dir) \
+ pci_map_single(dev, (page_address(page) + (off)), size, dir)
+#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir)
+
+/* pci_unmap_{single,page} is not a nop, thus... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* The PCI address space does equal the physical memory
+ * address space. The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __PPC64_PCI_H */
--- /dev/null
+/*
+ * pci_dma.h
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PCI_DMA_H
+#define _PCI_DMA_H
+
+#include <asm/types.h>
+#include <linux/spinlock.h>
+
+/*
+ * NUM_TCE_LEVELS defines the largest contiguous block
+ * of dma (tce) space we can get. NUM_TCE_LEVELS = 10
+ * allows up to 2**9 pages (512 * 4096) = 2 MB
+ */
+#define NUM_TCE_LEVELS 10
+
+#define NO_TCE ((dma_addr_t)-1)
+
+/*
+ * Tces come in two formats, one for the virtual bus and a different
+ * format for PCI
+ */
+#define TCE_VB 0
+#define TCE_PCI 1
+
+union Tce {
+ u64 wholeTce;
+ struct {
+ u64 cacheBits :6; /* Cache hash bits - not used */
+ u64 rsvd :6;
+ u64 rpn :40; /* Absolute page number */
+ u64 valid :1; /* Tce is valid (vb only) */
+ u64 allIo :1; /* Tce is valid for all lps (vb only) */
+ u64 lpIndex :8; /* LpIndex for user of TCE (vb only) */
+ u64 pciWrite :1; /* Write allowed (pci only) */
+ u64 readWrite :1; /* Read allowed (pci), Write allowed (vb) */
+ } tceBits;
+};
+
+struct Bitmap {
+ unsigned long numBits;
+ unsigned long numBytes;
+ unsigned char * map;
+};
+
+struct MultiLevelBitmap {
+ unsigned long maxLevel;
+ struct Bitmap level[NUM_TCE_LEVELS];
+};
+
+struct TceTable {
+ u64 busNumber;
+ u64 size;
+ u64 startOffset;
+ u64 base; /* pSeries native only */
+ u64 index;
+ u64 tceType;
+ spinlock_t lock;
+ struct MultiLevelBitmap mlbm;
+};
+
+struct TceTableManagerCB {
+ u64 busNumber; /* Bus number for this tce table */
+ u64 start; /* Will be NULL for secondary */
+ u64 totalSize; /* Size (in pages) of whole table */
+ u64 startOffset; /* Index into real tce table of the
+ start of our section */
+ u64 size; /* Size (in pages) of our section */
+ u64 index; /* Index of this tce table (token?) */
+ u16 maxTceTableIndex; /* Max num of tables for partition */
+ u8 virtualBusFlag; /* Flag to indicate virtual bus */
+ u8 rsvd[5];
+};
+
+extern struct TceTable virtBusTceTable; /* Tce table for virtual bus */
+
+extern void create_tce_tables(void);
+
+void tce_init_pSeries(void);
+void tce_init_iSeries(void);
+
+#endif
--- /dev/null
+#ifndef _PPC64_PGALLOC_H
+#define _PPC64_PGALLOC_H
+
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/Naca.h>
+#include <asm/Paca.h>
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define quicklists get_paca()
+
+#define pgd_quicklist (quicklists->pgd_cache)
+#define pmd_quicklist (quicklists->pmd_cache)
+#define pte_quicklist (quicklists->pte_cache)
+#define pgtable_cache_size (quicklists->pgtable_cache_sz)
+
+static inline pgd_t*
+pgd_alloc_one_fast (struct mm_struct *mm)
+{
+ unsigned long *ret = pgd_quicklist;
+
+ if (ret != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ --pgtable_cache_size;
+ } else
+ ret = NULL;
+ return (pgd_t *) ret;
+}
+
+static inline pgd_t*
+pgd_alloc (struct mm_struct *mm)
+{
+ /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
+ pgd_t *pgd = pgd_alloc_one_fast(mm);
+
+ if (pgd == NULL) {
+ pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (pgd != NULL)
+ clear_page(pgd);
+ }
+ return pgd;
+}
+
+static inline void
+pgd_free (pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ ++pgtable_cache_size;
+}
+
+#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
+
+static inline pmd_t*
+pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long *ret = (unsigned long *)pmd_quicklist;
+
+ if (ret != NULL) {
+ pmd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ --pgtable_cache_size;
+ }
+ return (pmd_t *)ret;
+}
+
+static inline pmd_t*
+pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+{
+ pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
+
+ if (pmd != NULL)
+ clear_page(pmd);
+ return pmd;
+}
+
+static inline void
+pmd_free (pmd_t *pmd)
+{
+ *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
+ pmd_quicklist = (unsigned long *) pmd;
+ ++pgtable_cache_size;
+}
+
+#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
+
+static inline pte_t*
+pte_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long *ret = (unsigned long *)pte_quicklist;
+
+ if (ret != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ --pgtable_cache_size;
+ }
+ return (pte_t *)ret;
+}
+
+
+static inline pte_t*
+pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+{
+ pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
+
+ if (pte != NULL)
+ clear_page(pte);
+ return pte;
+}
+
+static inline void
+pte_free (pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ ++pgtable_cache_size;
+}
+
+extern int do_check_pgt_cache(int, int);
+
+#endif /* _PPC64_PGALLOC_H */
--- /dev/null
+#ifndef _PPC64_PGTABLE_H
+#define _PPC64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 hashed page table.
+ */
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h> /* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#endif /* __ASSEMBLY__ */
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level. The PTE level must use a 64b record
+ * for each page table entry. The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE 9
+#define PMD_INDEX_SIZE 10
+#define PGD_INDEX_SIZE 10
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+#if 0
+/* DRENG / PPPBBB This is a compiler bug!!! */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#else
+#define USER_PTRS_PER_PGD (1024)
+#endif
+#define FIRST_USER_PGD_NR 0
+
+#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+ PGD_INDEX_SIZE + PAGE_SHIFT)
+
+/*
+ * Define the address range of the vmalloc VM area.
+ */
+#define VMALLOC_START (0xD000000000000000)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END (VMALLOC_START + VALID_EA_BITS)
+
+/*
+ * Define the address range of the imalloc VM area.
+ * (used for ioremap)
+ */
+#define IMALLOC_START (ioremap_bot)
+#define IMALLOC_VMADDR(x) ((unsigned long)(x))
+#define IMALLOC_BASE (0xE000000000000000)
+#define IMALLOC_END (IMALLOC_BASE + VALID_EA_BITS)
+
+/*
+ * Define the address range mapped virt <-> physical
+ */
+#define KRANGE_START KERNELBASE
+#define KRANGE_END (KRANGE_START + VALID_EA_BITS)
+
+/*
+ * Define the user address range
+ */
+#define USER_START (0UL)
+#define USER_END (USER_START + VALID_EA_BITS)
+
+
+/*
+ * Bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+#define _PAGE_PRESENT 0x001UL /* software: pte contains a translation */
+#define _PAGE_USER 0x002UL /* matches one of the PP bits */
+#define _PAGE_RW 0x004UL /* software: user write access allowed */
+#define _PAGE_GUARDED 0x008UL
+#define _PAGE_COHERENT 0x010UL /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020UL /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040UL /* W: cache write-through */
+#define _PAGE_DIRTY 0x080UL /* C: page changed */
+#define _PAGE_ACCESSED 0x100UL /* R: page referenced */
+#define _PAGE_HPTENOIX 0x200UL /* software: pte HPTE slot unknown */
+#define _PAGE_HASHPTE 0x400UL /* software: pte has an associated HPTE */
+#define _PAGE_EXEC 0x800UL /* software: i-cache coherence required */
+#define _PAGE_SECONDARY 0x8000UL /* software: HPTE is in secondary group */
+#define _PAGE_GROUP_IX 0x7000UL /* software: HPTE index within group */
+/* Bits 0x7000 identify the index within an HPT Group */
+#define _PAGE_HPTEFLAGS (_PAGE_HASHPTE | _PAGE_HPTENOIX | _PAGE_SECONDARY | _PAGE_GROUP_IX)
+/* PAGE_MASK gives the right answer below, but only by accident */
+/* It should be preserving the high 48 bits and then specifically */
+/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS)
+
+#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT
+
+#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY
+
+/* __pgprot defined in asm-ppc64/page.h */
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
+#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
+
+/*
+ * The PowerPC can only do execute protection on a segment (256MB) basis,
+ * not on a page basis. So we consider execute permission the same as read.
+ * Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY_X
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY_X
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY_X
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED_X
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED_X
+
+#ifndef __ASSEMBLY__
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#endif /* __ASSEMBLY__ */
+
+/* shift to put page number into pte */
+#define PTE_SHIFT (16)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * mk_pte_phys takes a physical address as input
+ *
+ * mk_pte takes a (struct page *) as input
+ */
+
+#define mk_pte_phys(physpage,pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = (((physpage)<<(PTE_SHIFT-PAGE_SHIFT)) | pgprot_val(pgprot)); \
+ pte; \
+})
+
+#define mk_pte(page,pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = ((unsigned long)((page) - mem_map) << PTE_SHIFT) | \
+ pgprot_val(pgprot); \
+ pte; \
+})
+
+#define pte_modify(_pte, newprot) \
+ (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
+
+#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+
+/* pte_clear moved to later in this file */
+
+#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
+#define pte_page(x) (mem_map+pte_pagenr(x))
+
+#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) ((pmd_val(pmd)) == 0)
+#define pmd_present(pmd) ((pmd_val(pmd)) != 0)
+#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
+#define pmd_page(pmd) (__bpn_to_ba(pmd_val(pmd)))
+#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_bad(pgd) ((pgd_val(pgd)) == 0)
+#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
+#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd)))
+
+/*
+ * Find an entry in a page-table-directory. We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD -1))
+
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir,addr) \
+ ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+/* Find an entry in the third-level page table.. */
+#define pte_offset(dir,addr) \
+ ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in the ioremap page-table-directory */
+#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
+
+/*
+ * Given a pointer to an mem_map[] entry, return the kernel virtual
+ * address corresponding to that page.
+ */
+#define page_address(page) ((page)->virtual)
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
+extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
+extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
+extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
+extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
+
+extern inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+extern inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+
+extern inline pte_t pte_rdprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_RW); return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
+extern inline pte_t pte_mkold(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+
+extern inline pte_t pte_mkread(pte_t pte) {
+ pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) {
+ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) {
+ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+
+/* Atomic PTE updates */
+
+static inline unsigned long pte_update( pte_t *p, unsigned long clr,
+ unsigned long set )
+{
+ unsigned long old, tmp;
+
+ __asm__ __volatile__("\n\
+1: ldarx %0,0,%3 \n\
+ andc %1,%0,%4 \n\
+ or %1,%1,%5 \n\
+ stdcx. %1,0,%3 \n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ return old;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
+}
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0;
+}
+
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
+{
+ return __pte(pte_update(ptep, ~_PAGE_HPTEFLAGS, 0));
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_update(ptep, _PAGE_RW, 0);
+}
+
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ pte_update(ptep, 0, _PAGE_DIRTY);
+}
+
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ * On machines which use an MMU hash table we avoid changing the
+ * _PAGE_HASHPTE bit.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+ pte_update(ptep, ~_PAGE_HPTEFLAGS, pte_val(pte) & ~_PAGE_HPTEFLAGS);
+}
+
+static inline void pte_clear(pte_t * ptep)
+{
+ pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
+}
+
+struct mm_struct;
+struct vm_area_struct;
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void local_flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma->vm_mm, start, end)
+
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* PPC has hw page tables. */
+}
+
+/*
+ * No cache flushing is required when address mappings are
+ * changed, because the caches on PowerPCs are physically
+ * addressed.
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(vma, a, b) do { } while (0)
+#define flush_cache_page(vma, p) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr, int len);
+extern void flush_icache_range(unsigned long, unsigned long);
+extern void __flush_dcache_icache(void *page_va);
+extern void flush_dcache_page(struct page *page);
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+
+extern unsigned long va_to_phys(unsigned long address);
+extern pte_t *va_to_pte(unsigned long address);
+extern unsigned long ioremap_bot, ioremap_base;
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern pgd_t swapper_pg_dir[1024];
+extern pgd_t ioremap_dir[1024];
+
+extern void paging_init(void);
+
+/*
+ * Page tables may have changed. We don't need to do anything here
+ * as entries are faulted into the hash table by the low-level
+ * data/instruction access exception handlers.
+ */
+/*
+ * We won't be able to use update_mmu_cache to update the
+ * hardware page table because we need to update the pte
+ * as well, but we don't get the address of the pte, only
+ * its value.
+ */
+#define update_mmu_cache(vma, addr, pte) do { } while (0)
+
+extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);
+extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte);
+
+/* Encode and de-code a swap entry */
+#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
+#define SWP_OFFSET(entry) ((entry).val >> 8)
+#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT })
+
+/*
+ * kern_addr_valid is intended to indicate whether an address is a valid
+ * kernel address. Most 32-bit archs define it as always true (like this)
+ * but most 64-bit archs actually perform a test. What should we do here?
+ * The only use is in fs/ncpfs/dir.c
+ */
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_page_range remap_page_range
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+extern void updateBoltedHptePP(unsigned long newpp, unsigned long ea);
+extern void hpte_init_pSeries(void);
+extern void hpte_init_iSeries(void);
+
+extern void make_pte(HPTE * htab, unsigned long va, unsigned long pa,
+ int mode, unsigned long hash_mask, int large);
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PPC64_PGTABLE_H */
--- /dev/null
+/*
+ * pmc.h
+ * Copyright (C) 2001 Dave Engebretsen & Mike Corrigan IBM Corporation.
+ *
+ * The PPC64 PMC subsystem encompases both the hardware PMC registers and
+ * a set of software event counters. An interface is provided via the
+ * proc filesystem which can be used to access this subsystem.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Start Change Log
+ * 2001/06/05 : engebret : Created.
+ * End Change Log
+ */
+
+#ifndef _PPC64_TYPES_H
+#include <asm/types.h>
+#endif
+
+#ifndef _PMC_H
+#define _PMC_H
+
+#define STAB_ENTRY_MAX 64
+
+struct _pmc_hw
+{
+ u64 mmcr0;
+ u64 mmcr1;
+ u64 mmcra;
+
+ u64 pmc1;
+ u64 pmc2;
+ u64 pmc3;
+ u64 pmc4;
+ u64 pmc5;
+ u64 pmc6;
+ u64 pmc7;
+ u64 pmc8;
+};
+
+struct _pmc_sw
+{
+ u64 stab_faults; /* Count of faults on the stab */
+ u64 stab_capacity_castouts;/* Count of castouts from the stab */
+ u64 stab_invalidations; /* Count of invalidations from the */
+ /* stab, not including castouts */
+ u64 stab_entry_use[STAB_ENTRY_MAX];
+
+ u64 htab_primary_overflows;
+ u64 htab_capacity_castouts;
+ u64 htab_read_to_write_fault;
+};
+
+#define PMC_HW_TEXT_ENTRY_COUNT (sizeof(struct _pmc_hw) / sizeof(u64))
+#define PMC_SW_TEXT_ENTRY_COUNT (sizeof(struct _pmc_sw) / sizeof(u64))
+#define PMC_TEXT_ENTRY_SIZE 64
+
+struct _pmc_sw_text {
+ char buffer[PMC_SW_TEXT_ENTRY_COUNT * PMC_TEXT_ENTRY_SIZE];
+};
+
+struct _pmc_hw_text {
+ char buffer[PMC_HW_TEXT_ENTRY_COUNT * PMC_TEXT_ENTRY_SIZE];
+};
+
+extern struct _pmc_sw pmc_sw_system;
+extern struct _pmc_sw pmc_sw_cpu[];
+
+extern struct _pmc_sw_text pmc_sw_text;
+extern struct _pmc_hw_text pmc_hw_text;
+extern char *ppc64_pmc_stab(int file);
+extern char *ppc64_pmc_htab(int file);
+extern char *ppc64_pmc_hw(int file);
+
+#if 1
+#define PMC_SW_PROCESSOR(F) pmc_sw_cpu[smp_processor_id()].F++
+#define PMC_SW_PROCESSOR_A(F, E) (pmc_sw_cpu[smp_processor_id()].F[(E)])++
+#define PMC_SW_SYSTEM(F) pmc_sw_system.F++
+#else
+#define PMC_SW_PROCESSOR(F) do {;} while (0)
+#define PMC_SW_PROCESSOR_A(F) do {;} while (0)
+#define PMC_SW_SYSTEM(F) do {;} while (0)
+#endif
+
+#define MMCR0 795
+#define MMCR1 798
+#define MMCRA 786
+#define PMC1 787
+#define PMC2 788
+#define PMC3 789
+#define PMC4 790
+#define PMC5 791
+#define PMC6 792
+#define PMC7 793
+#define PMC8 794
+
+#define PMC_CONTROL_CPI 1
+#define PMC_CONTROL_TLB 2
+
+#endif /* _PMC_H */
--- /dev/null
+#ifndef __PPC64_POLL_H
+#define __PPC64_POLL_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM 0x0100
+#define POLLWRBAND 0x0200
+#define POLLMSG 0x0400
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif /* __PPC64_POLL_H */
--- /dev/null
+#ifndef _PPC64_POSIX_TYPES_H
+#define _PPC64_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#if 0
+# define DRENG_TYPES
+#endif
+
+#ifdef DRENG_TYPES
+typedef unsigned long __kernel_dev_t;
+typedef unsigned long __kernel_ino_t;
+typedef unsigned long __kernel_nlink_t;
+#else
+typedef unsigned int __kernel_dev_t;
+typedef unsigned int __kernel_ino_t;
+typedef unsigned int __kernel_nlink_t;
+#endif
+typedef unsigned int __kernel_mode_t;
+typedef long __kernel_off_t;
+typedef long long __kernel_loff_t;
+typedef int __kernel_pid_t;
+typedef int __kernel_ipc_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned int __kernel_old_uid_t;
+typedef unsigned int __kernel_old_gid_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#ifndef __GNUC__
+
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
+#define __FD_ZERO(set) \
+ ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
+
+#else /* __GNUC__ */
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
+ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned long *tmp = (unsigned long *)p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+
+ case 8:
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+#endif /* __GNUC__ */
+#endif /* _PPC64_POSIX_TYPES_H */
--- /dev/null
+#ifndef _PPC64_PPC32_H
+#define _PPC64_PPC32_H
+
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+/*
+ * Data types and macros for providing 32b PowerPC support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __KERNEL_STRICT_NAMES
+#include <linux/types.h>
+typedef __kernel_fsid_t __kernel_fsid_t32;
+#endif
+
+/* Use this to get at 32-bit user passed pointers. */
+/* Things to consider: the low-level assembly stub does
+ srl x, 0, x for first four arguments, so if you have
+ pointer to something in the first four arguments, just
+ declare it as a pointer, not u32. On the other side,
+ arguments from 5th onwards should be declared as u32
+ for pointers, and need AA() around each usage.
+ A() macro should be used for places where you e.g.
+ have some internal variable u32 and just want to get
+ rid of a compiler warning. AA() has to be used in
+ places where you want to convert a function argument
+ to 32bit pointer or when you e.g. access pt_regs
+ structure and want to consider 32bit registers only.
+ -
+ */
+#define A(__x) ((unsigned long)(__x))
+#define AA(__x) \
+({ unsigned long __ret; \
+ __asm__ ("clrldi %0, %0, 32" \
+ : "=r" (__ret) \
+ : "0" (__x)); \
+ __ret; \
+})
+
+/* These are here to support 32-bit syscalls on a 64-bit kernel. */
+typedef unsigned int __kernel_size_t32;
+typedef int __kernel_ssize_t32;
+typedef int __kernel_ptrdiff_t32;
+typedef int __kernel_time_t32;
+typedef int __kernel_clock_t32;
+typedef int __kernel_pid_t32;
+typedef unsigned short __kernel_ipc_pid_t32;
+typedef unsigned int __kernel_uid_t32;
+typedef unsigned int __kernel_gid_t32;
+typedef unsigned int __kernel_dev_t32;
+typedef unsigned int __kernel_ino_t32;
+typedef unsigned int __kernel_mode_t32;
+typedef unsigned int __kernel_umode_t32;
+typedef short __kernel_nlink_t32;
+typedef int __kernel_daddr_t32;
+typedef int __kernel_off_t32;
+typedef unsigned int __kernel_caddr_t32;
+typedef int __kernel_loff_t32;
+/* typedef __kernel_fsid_t __kernel_fsid_t32; */
+
+struct statfs32 {
+ int f_type;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ __kernel_fsid_t32 f_fsid;
+ int f_namelen; /* SunOS ignores this field. */
+ int f_spare[6];
+};
+
+typedef union sigval32 {
+ int sival_int;
+ unsigned int sival_ptr;
+} sigval_t32;
+
+typedef struct siginfo32 {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ __kernel_pid_t32 _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ unsigned int _timer1;
+ unsigned int _timer2;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ __kernel_pid_t32 _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ sigval_t32 _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ __kernel_pid_t32 _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ __kernel_clock_t32 _utime;
+ __kernel_clock_t32 _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
+ struct {
+ u32 _addr; /* faulting insn/memory ref. */
+ int _trapno;
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t32;
+
+#define __old_sigset_t32 old_sigset_t32
+#define __old_sigaction32 old_sigaction32
+
+typedef unsigned int __old_sigset_t32;
+struct __old_sigaction32 {
+ unsigned sa_handler;
+ __old_sigset_t32 sa_mask;
+ unsigned int sa_flags;
+ unsigned sa_restorer; /* not used by Linux/SPARC yet */
+};
+
+
+
+#define _PPC32_NSIG 64
+#define _PPC32_NSIG_BPW 32
+#define _PPC32_NSIG_WORDS (_PPC32_NSIG / _PPC32_NSIG_BPW)
+
+typedef struct {
+ unsigned int sig[_PPC32_NSIG_WORDS];
+} sigset32_t;
+
+struct sigaction32 {
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+ sigset32_t sa_mask; /* A 32 bit mask */
+};
+
+typedef struct sigaltstack_32 {
+ unsigned int ss_sp;
+ int ss_flags;
+ __kernel_size_t32 ss_size;
+} stack_32_t;
+
+struct flock32 {
+ short l_type;
+ short l_whence;
+ __kernel_off_t32 l_start;
+ __kernel_off_t32 l_len;
+ __kernel_pid_t32 l_pid;
+ short __unused;
+};
+
+struct stat32 {
+ __kernel_dev_t32 st_dev; /* 2 */
+ /* __kernel_dev_t32 __pad1; */ /* 2 */
+ __kernel_ino_t32 st_ino; /* 4 */
+ __kernel_mode_t32 st_mode; /* 2 */
+ short st_nlink; /* 2 */
+ __kernel_uid_t32 st_uid; /* 2 */
+ __kernel_gid_t32 st_gid; /* 2 */
+ __kernel_dev_t32 st_rdev; /* 2 */
+ /* __kernel_dev_t32 __pad2; */ /* 2 */
+ __kernel_off_t32 st_size; /* 4 */
+ __kernel_off_t32 st_blksize; /* 4 */
+ __kernel_off_t32 st_blocks; /* 4 */
+ __kernel_time_t32 st_atime; /* 4 */
+ unsigned int __unused1; /* 4 */
+ __kernel_time_t32 st_mtime; /* 4 */
+ unsigned int __unused2; /* 4 */
+ __kernel_time_t32 st_ctime; /* 4 */
+ unsigned int __unused3; /* 4 */
+ unsigned int __unused4[2]; /* 2*4 */
+};
+
+struct __old_kernel_stat32
+{
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned int st_size;
+ unsigned int st_atime;
+ unsigned int st_mtime;
+ unsigned int st_ctime;
+};
+
+struct sigcontext32_struct {
+ unsigned int _unused[4];
+ int signal;
+ unsigned int handler;
+ unsigned int oldmask;
+ u32 regs; /* 4 byte pointer to the pt_regs32 structure. */
+};
+
+struct ucontext32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ stack_32_t uc_stack;
+ struct sigcontext32_struct uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+
+#endif /* _PPC64_PPC32_H */
--- /dev/null
+#ifndef __PPCDEBUG_H
+#define __PPCDEBUG_H
+/********************************************************************
+ * Author: Adam Litke, IBM Corp
+ * (c) 2001
+ *
+ * This file contains definitions and macros for a runtime debugging
+ * system for ppc64 (This should also work on 32 bit with a few
+ * adjustments.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/config.h>
+#include <asm/udbg.h>
+#include <stdarg.h>
+
+#define PPCDBG_BITVAL(X) ((1UL)<<((unsigned long)(X)))
+
+/* Defined below are the bit positions of various debug flags in the
+ * debug_switch variable (defined in Naca.h).
+ * -- When adding new values, please enter them into trace names below --
+ *
+ * Values 62 & 63 can be used to stress the hardware page table management
+ * code. They must be set statically, any attempt to change them dynamically
+ * would be a very bad idea.
+ */
+#define PPCDBG_MMINIT PPCDBG_BITVAL(0)
+#define PPCDBG_MM PPCDBG_BITVAL(1)
+#define PPCDBG_SYS32 PPCDBG_BITVAL(2)
+#define PPCDBG_SYS32NI PPCDBG_BITVAL(3)
+#define PPCDBG_SYS32X PPCDBG_BITVAL(4)
+#define PPCDBG_SYS32M PPCDBG_BITVAL(5)
+#define PPCDBG_SYS64 PPCDBG_BITVAL(6)
+#define PPCDBG_SYS64NI PPCDBG_BITVAL(7)
+#define PPCDBG_SYS64X PPCDBG_BITVAL(8)
+#define PPCDBG_SIGNAL PPCDBG_BITVAL(9)
+#define PPCDBG_SIGNALXMON PPCDBG_BITVAL(10)
+#define PPCDBG_BINFMT32 PPCDBG_BITVAL(11)
+#define PPCDBG_BINFMT64 PPCDBG_BITVAL(12)
+#define PPCDBG_BINFMTXMON PPCDBG_BITVAL(13)
+#define PPCDBG_BINFMT_32ADDR PPCDBG_BITVAL(14)
+#define PPCDBG_ALIGNFIXUP PPCDBG_BITVAL(15)
+#define PPCDBG_TCEINIT PPCDBG_BITVAL(16)
+#define PPCDBG_TCE PPCDBG_BITVAL(17)
+#define PPCDBG_PHBINIT PPCDBG_BITVAL(18)
+#define PPCDBG_SMP PPCDBG_BITVAL(19)
+#define PPCDBG_BOOT PPCDBG_BITVAL(20)
+#define PPCDBG_BUSWALK PPCDBG_BITVAL(21)
+#define PPCDBG_HTABSTRESS PPCDBG_BITVAL(62)
+#define PPCDBG_HTABSIZE PPCDBG_BITVAL(63)
+#define PPCDBG_NONE (0UL)
+#define PPCDBG_ALL (0xffffffffUL)
+
+/* The default initial value for the debug switch */
+#define PPC_DEBUG_DEFAULT 0
+/* #define PPC_DEBUG_DEFAULT PPCDBG_ALL */
+
+#define PPCDBG_NUM_FLAGS 64
+
+#ifdef WANT_PPCDBG_TAB
+/* A table of debug switch names to allow name lookup in xmon
+ * (and whoever else wants it.
+ */
+char *trace_names[PPCDBG_NUM_FLAGS] = {
+ /* Known debug names */
+ "mminit", "mm",
+ "syscall32", "syscall32_ni", "syscall32x", "syscall32m",
+ "syscall64", "syscall64_ni", "syscall64x",
+ "signal", "signal_xmon",
+ "binfmt32", "binfmt64", "binfmt_xmon", "binfmt_32addr",
+ "alignfixup", "tceinit", "tce", "phb_init",
+ "smp", "boot", "buswalk"
+};
+#else
+extern char *trace_names[64];
+#endif /* WANT_PPCDBG_TAB */
+
+#ifdef CONFIG_PPCDBG
+/* Macro to conditionally print debug based on debug_switch */
+#define PPCDBG(...) udbg_ppcdbg(__VA_ARGS__)
+
+/* Macro to conditionally call a debug routine based on debug_switch */
+#define PPCDBGCALL(FLAGS,FUNCTION) ifppcdebug(FLAGS) FUNCTION
+
+/* Macros to test for debug states */
+#define ifppcdebug(FLAGS) if (udbg_ifdebug(FLAGS))
+#define ppcdebugset(FLAGS) (udbg_ifdebug(FLAGS))
+#define PPCDBG_BINFMT (test_thread_flag(TIF_32BIT) ? PPCDBG_BINFMT32 : PPCDBG_BINFMT64)
+
+#ifdef CONFIG_XMON
+#define PPCDBG_ENTER_DEBUGGER() xmon(0)
+#define PPCDBG_ENTER_DEBUGGER_REGS(X) xmon(X)
+#endif
+
+#else
+#define PPCDBG(...) do {;} while (0)
+#define PPCDBGCALL(FLAGS,FUNCTION) do {;} while (0)
+#define ifppcdebug(...) if (0)
+#define ppcdebugset(FLAGS) (0)
+#endif /* CONFIG_PPCDBG */
+
+#ifndef PPCDBG_ENTER_DEBUGGER
+#define PPCDBG_ENTER_DEBUGGER() do {;} while(0)
+#endif
+
+#endif /*__PPCDEBUG_H */
--- /dev/null
+#ifndef _PPC64_PROC_FS_H
+#define _PPC64_PROC_FS_H
+/*
+ * proc_fs.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Change Activity: */
+/* tgall -- merge of iSeries/iSeries_proc.h and proc_pmc.h */
+/* End Change Activity */
+
+#include <linux/proc_fs.h>
+
+void pmc_proc_init(struct proc_dir_entry *iSeries_proc);
+void proc_ppc64_init(void);
+
+#include <asm/iSeries/iSeries_proc.h>
+
+#endif
--- /dev/null
+/*
+ * pmc_proc.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* Change Activity: */
+/* End Change Activity */
+
+#ifndef _PMC_PROC_H
+#define _PMC_PROC_H
+
+#include <linux/proc_fs.h>
+
+void pmc_proc_init(struct proc_dir_entry *iSeries_proc);
+void proc_ppc64_init(void);
+
+#endif /* _PMC_PROC_H */
+
--- /dev/null
+#ifndef __ASM_PPC64_PROCESSOR_H
+#define __ASM_PPC64_PROCESSOR_H
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/stringify.h>
+#ifndef __ASSEMBLY__
+#include <asm/atomic.h>
+#include <asm/ppcdebug.h>
+#include <asm/a.out.h>
+#endif
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/* Machine State Register (MSR) Fields */
+#define MSR_SF_LG 63 /* Enable 64 bit mode */
+#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
+#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_VEC_LG 25 /* Enable AltiVec */
+#define MSR_POW_LG 18 /* Enable Power Management */
+#define MSR_WE_LG 18 /* Wait State Enable */
+#define MSR_TGPR_LG 17 /* TLB Update registers in use */
+#define MSR_CE_LG 17 /* Critical Interrupt Enable */
+#define MSR_ILE_LG 16 /* Interrupt Little Endian */
+#define MSR_EE_LG 15 /* External Interrupt Enable */
+#define MSR_PR_LG 14 /* Problem State / Privilege Level */
+#define MSR_FP_LG 13 /* Floating Point enable */
+#define MSR_ME_LG 12 /* Machine Check Enable */
+#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
+#define MSR_SE_LG 10 /* Single Step */
+#define MSR_BE_LG 9 /* Branch Trace */
+#define MSR_DE_LG 9 /* Debug Exception Enable */
+#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
+#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG 5 /* Instruction Relocate */
+#define MSR_DR_LG 4 /* Data Relocate */
+#define MSR_PE_LG 3 /* Protection Enable */
+#define MSR_PX_LG 2 /* Protection Exclusive Mode */
+#define MSR_RI_LG 1 /* Recoverable Exception */
+#define MSR_LE_LG 0 /* Little Endian */
+
+#ifdef __ASSEMBLY__
+#define MASK(X) (1<<(X))
+#else
+#define MASK(X) (1UL<<(X))
+#endif
+
+#define MSR_SF MASK(MSR_SF_LG) /* Enable 64 bit mode */
+#define MSR_ISF MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
+#define MSR_HV MASK(MSR_HV_LG) /* Hypervisor state */
+#define MSR_VEC MASK(MSR_VEC_LG) /* Enable AltiVec */
+#define MSR_POW MASK(MSR_POW_LG) /* Enable Power Management */
+#define MSR_WE MASK(MSR_WE_LG) /* Wait State Enable */
+#define MSR_TGPR MASK(MSR_TGPR_LG)/* TLB Update registers in use */
+#define MSR_CE MASK(MSR_CE_LG) /* Critical Interrupt Enable */
+#define MSR_ILE MASK(MSR_ILE_LG) /* Interrupt Little Endian */
+#define MSR_EE MASK(MSR_EE_LG) /* External Interrupt Enable */
+#define MSR_PR MASK(MSR_PR_LG) /* Problem State / Privilege Level */
+#define MSR_FP MASK(MSR_FP_LG) /* Floating Point enable */
+#define MSR_ME MASK(MSR_ME_LG) /* Machine Check Enable */
+#define MSR_FE0 MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
+#define MSR_SE MASK(MSR_SE_LG) /* Single Step */
+#define MSR_BE MASK(MSR_BE_LG) /* Branch Trace */
+#define MSR_DE MASK(MSR_DE_LG) /* Debug Exception Enable */
+#define MSR_FE1 MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
+#define MSR_IP MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
+#define MSR_IR MASK(MSR_IR_LG) /* Instruction Relocate */
+#define MSR_DR MASK(MSR_DR_LG) /* Data Relocate */
+#define MSR_PE MASK(MSR_PE_LG) /* Protection Enable */
+#define MSR_PX MASK(MSR_PX_LG) /* Protection Exclusive Mode */
+#define MSR_RI MASK(MSR_RI_LG) /* Recoverable Exception */
+#define MSR_LE MASK(MSR_LE_LG) /* Little Endian */
+
+#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
+#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
+
+#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
+#define MSR_USER64 MSR_USER32 | MSR_SF
+
+/* Floating Point Status and Control Register (FPSCR) Fields */
+
+#define FPSCR_FX 0x80000000 /* FPU exception summary */
+#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
+#define FPSCR_VX 0x20000000 /* Invalid operation summary */
+#define FPSCR_OX 0x10000000 /* Overflow exception summary */
+#define FPSCR_UX 0x08000000 /* Underflow exception summary */
+#define FPSCR_ZX 0x04000000 /* Zero-devide exception summary */
+#define FPSCR_XX 0x02000000 /* Inexact exception summary */
+#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
+#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
+#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
+#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
+#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
+#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
+#define FPSCR_FR 0x00040000 /* Fraction rounded */
+#define FPSCR_FI 0x00020000 /* Fraction inexact */
+#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
+#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
+#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
+#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
+#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
+#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
+#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
+#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
+#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
+#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
+#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
+#define FPSCR_RN 0x00000003 /* FPU rounding control */
+
+/* Special Purpose Registers (SPRNs)*/
+
+#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */
+#define SPRN_CTR 0x009 /* Count Register */
+#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
+#define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */
+#define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */
+#define SPRN_DAR 0x013 /* Data Address Register */
+#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
+#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
+#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
+#define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */
+#define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */
+#define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */
+#define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */
+#define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */
+#define SPRN_DBCR 0x3F2 /* Debug Control Regsiter */
+#define DBCR_EDM 0x80000000
+#define DBCR_IDM 0x40000000
+#define DBCR_RST(x) (((x) & 0x3) << 28)
+#define DBCR_RST_NONE 0
+#define DBCR_RST_CORE 1
+#define DBCR_RST_CHIP 2
+#define DBCR_RST_SYSTEM 3
+#define DBCR_IC 0x08000000 /* Instruction Completion Debug Evnt */
+#define DBCR_BT 0x04000000 /* Branch Taken Debug Event */
+#define DBCR_EDE 0x02000000 /* Exception Debug Event */
+#define DBCR_TDE 0x01000000 /* TRAP Debug Event */
+#define DBCR_FER 0x00F80000 /* First Events Remaining Mask */
+#define DBCR_FT 0x00040000 /* Freeze Timers on Debug Event */
+#define DBCR_IA1 0x00020000 /* Instr. Addr. Compare 1 Enable */
+#define DBCR_IA2 0x00010000 /* Instr. Addr. Compare 2 Enable */
+#define DBCR_D1R 0x00008000 /* Data Addr. Compare 1 Read Enable */
+#define DBCR_D1W 0x00004000 /* Data Addr. Compare 1 Write Enable */
+#define DBCR_D1S(x) (((x) & 0x3) << 12) /* Data Adrr. Compare 1 Size */
+#define DAC_BYTE 0
+#define DAC_HALF 1
+#define DAC_WORD 2
+#define DAC_QUAD 3
+#define DBCR_D2R 0x00000800 /* Data Addr. Compare 2 Read Enable */
+#define DBCR_D2W 0x00000400 /* Data Addr. Compare 2 Write Enable */
+#define DBCR_D2S(x) (((x) & 0x3) << 8) /* Data Addr. Compare 2 Size */
+#define DBCR_SBT 0x00000040 /* Second Branch Taken Debug Event */
+#define DBCR_SED 0x00000020 /* Second Exception Debug Event */
+#define DBCR_STD 0x00000010 /* Second Trap Debug Event */
+#define DBCR_SIA 0x00000008 /* Second IAC Enable */
+#define DBCR_SDA 0x00000004 /* Second DAC Enable */
+#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
+#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
+#define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */
+#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */
+#define SPRN_DBSR 0x3F0 /* Debug Status Register */
+#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
+#define DCCR_NOCACHE 0 /* Noncacheable */
+#define DCCR_CACHE 1 /* Cacheable */
+#define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */
+#define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */
+#define DCWR_COPY 0 /* Copy-back */
+#define DCWR_WRITE 1 /* Write-through */
+#define SPRN_DEAR 0x3D5 /* Data Error Address Register */
+#define SPRN_DEC 0x016 /* Decrement Register */
+#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
+#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+#define SPRN_EAR 0x11A /* External Address Register */
+#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */
+#define ESR_IMCP 0x80000000 /* Instr. Machine Check - Protection */
+#define ESR_IMCN 0x40000000 /* Instr. Machine Check - Non-config */
+#define ESR_IMCB 0x20000000 /* Instr. Machine Check - Bus error */
+#define ESR_IMCT 0x10000000 /* Instr. Machine Check - Timeout */
+#define ESR_PIL 0x08000000 /* Program Exception - Illegal */
+#define ESR_PPR 0x04000000 /* Program Exception - Priveleged */
+#define ESR_PTR 0x02000000 /* Program Exception - Trap */
+#define ESR_DST 0x00800000 /* Storage Exception - Data miss */
+#define ESR_DIZ 0x00400000 /* Storage Exception - Zone fault */
+#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
+#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
+#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
+#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
+#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
+#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
+#define HID0_SBCLK (1<<27)
+#define HID0_EICE (1<<26)
+#define HID0_ECLK (1<<25)
+#define HID0_PAR (1<<24)
+#define HID0_DOZE (1<<23)
+#define HID0_NAP (1<<22)
+#define HID0_SLEEP (1<<21)
+#define HID0_DPM (1<<20)
+#define HID0_ICE (1<<15) /* Instruction Cache Enable */
+#define HID0_DCE (1<<14) /* Data Cache Enable */
+#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
+#define HID0_DLOCK (1<<12) /* Data Cache Lock */
+#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
+#define HID0_DCI (1<<10) /* Data Cache Invalidate */
+#define HID0_SPD (1<<9) /* Speculative disable */
+#define HID0_SGE (1<<7) /* Store Gathering Enable */
+#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
+#define HID0_BTIC (1<<5) /* Branch Target Instruction Cache Enable */
+#define HID0_ABE (1<<3) /* Address Broadcast Enable */
+#define HID0_BHTE (1<<2) /* Branch History Table Enable */
+#define HID0_BTCD (1<<1) /* Branch target cache disable */
+#define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
+#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
+#define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
+#define SPRN_TSC 0x3FD /* Thread switch control */
+#define SPRN_TST 0x3FC /* Thread switch timeout */
+#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
+#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
+#define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */
+#define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */
+#define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */
+#define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */
+#define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */
+#define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */
+#define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */
+#define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */
+#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define ICCR_NOCACHE 0 /* Noncacheable */
+#define ICCR_CACHE 1 /* Cacheable */
+#define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */
+#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */
+#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */
+#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
+#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
+#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
+#define SPRN_LR 0x008 /* Link Register */
+#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
+#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
+#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */
+#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */
+#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */
+#define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */
+#define SPRN_PID 0x3B1 /* Process ID */
+#define SPRN_PIR 0x3FF /* Processor Identification Register */
+#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
+#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
+#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
+#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
+#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
+#define SPRN_PVR 0x11F /* Processor Version Register */
+#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
+#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
+#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
+#define SPRN_SGR 0x3B9 /* Storage Guarded Register */
+#define SGR_NORMAL 0
+#define SGR_GUARDED 1
+#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
+#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
+#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
+#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
+#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
+#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
+#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+#define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */
+#define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */
+#define SPRN_TBHI 0x3DC /* Time Base High */
+#define SPRN_TBHU 0x3CC /* Time Base High User-mode */
+#define SPRN_TBLO 0x3DD /* Time Base Low */
+#define SPRN_TBLU 0x3CD /* Time Base Low User-mode */
+#define SPRN_TBRL 0x10D /* Time Base Read Lower Register */
+#define SPRN_TBRU 0x10C /* Time Base Read Upper Register */
+#define SPRN_TBWL 0x11D /* Time Base Write Lower Register */
+#define SPRN_TBWU 0x11C /* Time Base Write Upper Register */
+#define SPRN_TCR 0x3DA /* Timer Control Register */
+#define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */
+#define WP_2_17 0 /* 2^17 clocks */
+#define WP_2_21 1 /* 2^21 clocks */
+#define WP_2_25 2 /* 2^25 clocks */
+#define WP_2_29 3 /* 2^29 clocks */
+#define TCR_WRC(x) (((x)&0x3)<<28) /* WDT Reset Control */
+#define WRC_NONE 0 /* No reset will occur */
+#define WRC_CORE 1 /* Core reset will occur */
+#define WRC_CHIP 2 /* Chip reset will occur */
+#define WRC_SYSTEM 3 /* System reset will occur */
+#define TCR_WIE 0x08000000 /* WDT Interrupt Enable */
+#define TCR_PIE 0x04000000 /* PIT Interrupt Enable */
+#define TCR_FP(x) (((x)&0x3)<<24) /* FIT Period */
+#define FP_2_9 0 /* 2^9 clocks */
+#define FP_2_13 1 /* 2^13 clocks */
+#define FP_2_17 2 /* 2^17 clocks */
+#define FP_2_21 3 /* 2^21 clocks */
+#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
+#define TCR_ARE 0x00400000 /* Auto Reload Enable */
+#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
+#define THRM1_TIN (1<<0)
+#define THRM1_TIV (1<<1)
+#define THRM1_THRES (0x7f<<2)
+#define THRM1_TID (1<<29)
+#define THRM1_TIE (1<<30)
+#define THRM1_V (1<<31)
+#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */
+#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */
+#define THRM3_E (1<<31)
+#define SPRN_TSR 0x3D8 /* Timer Status Register */
+#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
+#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
+#define TSR_WRS(x) (((x)&0x3)<<28) /* WDT Reset Status */
+#define WRS_NONE 0 /* No WDT reset occurred */
+#define WRS_CORE 1 /* WDT forced core reset */
+#define WRS_CHIP 2 /* WDT forced chip reset */
+#define WRS_SYSTEM 3 /* WDT forced system reset */
+#define TSR_PIS 0x08000000 /* PIT Interrupt Status */
+#define TSR_FIS 0x04000000 /* FIT Interrupt Status */
+#define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */
+#define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */
+#define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */
+#define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */
+#define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */
+#define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */
+#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
+#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+#define SPRN_ZPR 0x3B0 /* Zone Protection Register */
+
+/* Short-hand versions for a number of the above SPRNs */
+
+#define CTR SPRN_CTR /* Counter Register */
+#define DAR SPRN_DAR /* Data Address Register */
+#define DABR SPRN_DABR /* Data Address Breakpoint Register */
+#define DBAT0L SPRN_DBAT0L /* Data BAT 0 Lower Register */
+#define DBAT0U SPRN_DBAT0U /* Data BAT 0 Upper Register */
+#define DBAT1L SPRN_DBAT1L /* Data BAT 1 Lower Register */
+#define DBAT1U SPRN_DBAT1U /* Data BAT 1 Upper Register */
+#define DBAT2L SPRN_DBAT2L /* Data BAT 2 Lower Register */
+#define DBAT2U SPRN_DBAT2U /* Data BAT 2 Upper Register */
+#define DBAT3L SPRN_DBAT3L /* Data BAT 3 Lower Register */
+#define DBAT3U SPRN_DBAT3U /* Data BAT 3 Upper Register */
+#define DCMP SPRN_DCMP /* Data TLB Compare Register */
+#define DEC SPRN_DEC /* Decrement Register */
+#define DMISS SPRN_DMISS /* Data TLB Miss Register */
+#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
+#define EAR SPRN_EAR /* External Address Register */
+#define HASH1 SPRN_HASH1 /* Primary Hash Address Register */
+#define HASH2 SPRN_HASH2 /* Secondary Hash Address Register */
+#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
+#define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
+#define NIADORM SPRN_NIADORM /* NIA Dormant Register */
+#define TSC SPRN_TSC /* Thread switch control */
+#define TST SPRN_TST /* Thread switch timeout */
+#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
+#define IBAT0L SPRN_IBAT0L /* Instruction BAT 0 Lower Register */
+#define IBAT0U SPRN_IBAT0U /* Instruction BAT 0 Upper Register */
+#define IBAT1L SPRN_IBAT1L /* Instruction BAT 1 Lower Register */
+#define IBAT1U SPRN_IBAT1U /* Instruction BAT 1 Upper Register */
+#define IBAT2L SPRN_IBAT2L /* Instruction BAT 2 Lower Register */
+#define IBAT2U SPRN_IBAT2U /* Instruction BAT 2 Upper Register */
+#define IBAT3L SPRN_IBAT3L /* Instruction BAT 3 Lower Register */
+#define IBAT3U SPRN_IBAT3U /* Instruction BAT 3 Upper Register */
+#define ICMP SPRN_ICMP /* Instruction TLB Compare Register */
+#define IMISS SPRN_IMISS /* Instruction TLB Miss Register */
+#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */
+#define L2CR SPRN_L2CR /* PPC 750 L2 control register */
+#define LR SPRN_LR
+#define PVR SPRN_PVR /* Processor Version */
+#define PIR SPRN_PIR /* Processor ID */
+#define RPA SPRN_RPA /* Required Physical Address Register */
+#define SDR1 SPRN_SDR1 /* MMU hash base register */
+#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
+#define SPR1 SPRN_SPRG1
+#define SPR2 SPRN_SPRG2
+#define SPR3 SPRN_SPRG3
+#define SPRG0 SPRN_SPRG0
+#define SPRG1 SPRN_SPRG1
+#define SPRG2 SPRN_SPRG2
+#define SPRG3 SPRN_SPRG3
+#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
+#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
+#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
+#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
+#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
+#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
+#define ICTC 1019
+#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */
+#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */
+#define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */
+#define XER SPRN_XER
+
+
+/* Device Control Registers */
+
+#define DCRN_BEAR 0x090 /* Bus Error Address Register */
+#define DCRN_BESR 0x091 /* Bus Error Syndrome Register */
+#define BESR_DSES 0x80000000 /* Data-Side Error Status */
+#define BESR_DMES 0x40000000 /* DMA Error Status */
+#define BESR_RWS 0x20000000 /* Read/Write Status */
+#define BESR_ETMASK 0x1C000000 /* Error Type */
+#define ET_PROT 0
+#define ET_PARITY 1
+#define ET_NCFG 2
+#define ET_BUSERR 4
+#define ET_BUSTO 6
+#define DCRN_DMACC0 0x0C4 /* DMA Chained Count Register 0 */
+#define DCRN_DMACC1 0x0CC /* DMA Chained Count Register 1 */
+#define DCRN_DMACC2 0x0D4 /* DMA Chained Count Register 2 */
+#define DCRN_DMACC3 0x0DC /* DMA Chained Count Register 3 */
+#define DCRN_DMACR0 0x0C0 /* DMA Channel Control Register 0 */
+#define DCRN_DMACR1 0x0C8 /* DMA Channel Control Register 1 */
+#define DCRN_DMACR2 0x0D0 /* DMA Channel Control Register 2 */
+#define DCRN_DMACR3 0x0D8 /* DMA Channel Control Register 3 */
+#define DCRN_DMACT0 0x0C1 /* DMA Count Register 0 */
+#define DCRN_DMACT1 0x0C9 /* DMA Count Register 1 */
+#define DCRN_DMACT2 0x0D1 /* DMA Count Register 2 */
+#define DCRN_DMACT3 0x0D9 /* DMA Count Register 3 */
+#define DCRN_DMADA0 0x0C2 /* DMA Destination Address Register 0 */
+#define DCRN_DMADA1 0x0CA /* DMA Destination Address Register 1 */
+#define DCRN_DMADA2 0x0D2 /* DMA Destination Address Register 2 */
+#define DCRN_DMADA3 0x0DA /* DMA Destination Address Register 3 */
+#define DCRN_DMASA0 0x0C3 /* DMA Source Address Register 0 */
+#define DCRN_DMASA1 0x0CB /* DMA Source Address Register 1 */
+#define DCRN_DMASA2 0x0D3 /* DMA Source Address Register 2 */
+#define DCRN_DMASA3 0x0DB /* DMA Source Address Register 3 */
+#define DCRN_DMASR 0x0E0 /* DMA Status Register */
+#define DCRN_EXIER 0x042 /* External Interrupt Enable Register */
+#define EXIER_CIE 0x80000000 /* Critical Interrupt Enable */
+#define EXIER_SRIE 0x08000000 /* Serial Port Rx Int. Enable */
+#define EXIER_STIE 0x04000000 /* Serial Port Tx Int. Enable */
+#define EXIER_JRIE 0x02000000 /* JTAG Serial Port Rx Int. Enable */
+#define EXIER_JTIE 0x01000000 /* JTAG Serial Port Tx Int. Enable */
+#define EXIER_D0IE 0x00800000 /* DMA Channel 0 Interrupt Enable */
+#define EXIER_D1IE 0x00400000 /* DMA Channel 1 Interrupt Enable */
+#define EXIER_D2IE 0x00200000 /* DMA Channel 2 Interrupt Enable */
+#define EXIER_D3IE 0x00100000 /* DMA Channel 3 Interrupt Enable */
+#define EXIER_E0IE 0x00000010 /* External Interrupt 0 Enable */
+#define EXIER_E1IE 0x00000008 /* External Interrupt 1 Enable */
+#define EXIER_E2IE 0x00000004 /* External Interrupt 2 Enable */
+#define EXIER_E3IE 0x00000002 /* External Interrupt 3 Enable */
+#define EXIER_E4IE 0x00000001 /* External Interrupt 4 Enable */
+#define DCRN_EXISR 0x040 /* External Interrupt Status Register */
+#define DCRN_IOCR 0x0A0 /* Input/Output Configuration Register */
+#define IOCR_E0TE 0x80000000
+#define IOCR_E0LP 0x40000000
+#define IOCR_E1TE 0x20000000
+#define IOCR_E1LP 0x10000000
+#define IOCR_E2TE 0x08000000
+#define IOCR_E2LP 0x04000000
+#define IOCR_E3TE 0x02000000
+#define IOCR_E3LP 0x01000000
+#define IOCR_E4TE 0x00800000
+#define IOCR_E4LP 0x00400000
+#define IOCR_EDT 0x00080000
+#define IOCR_SOR 0x00040000
+#define IOCR_EDO 0x00008000
+#define IOCR_2XC 0x00004000
+#define IOCR_ATC 0x00002000
+#define IOCR_SPD 0x00001000
+#define IOCR_BEM 0x00000800
+#define IOCR_PTD 0x00000400
+#define IOCR_ARE 0x00000080
+#define IOCR_DRC 0x00000020
+#define IOCR_RDM(x) (((x) & 0x3) << 3)
+#define IOCR_TCS 0x00000004
+#define IOCR_SCS 0x00000002
+#define IOCR_SPC 0x00000001
+
+
+/* Processor Version Register */
+
+/* Processor Version Register (PVR) field extraction */
+
+#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
+#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+
+/* Processor Version Numbers */
+#define PV_PULSAR 0x0034
+#define PV_POWER4 0x0035
+#define PV_ICESTAR 0x0036
+#define PV_SSTAR 0x0037
+#define PV_630 0x0040
+#define PV_630p 0x0041
+
+/* Platforms supported by PPC64. _machine is actually a set of flags */
+#define _MACH_pSeriesHW 0x00010000
+#define _MACH_iSeriesHW 0x00020000
+#define _MACH_LPAR 0x00000001
+
+#define _MACH_unknown 0x00000000
+#define _MACH_pSeries (_MACH_pSeriesHW)
+#define _MACH_pSeriesLP (_MACH_pSeriesHW | _MACH_LPAR)
+#define _MACH_iSeries (_MACH_iSeriesHW | _MACH_LPAR)
+
+/* Compat defines for drivers */
+#define _MACH_Pmac 0xf0000000 /* bogus value */
+
+/*
+ * List of interrupt controllers.
+ */
+#define IC_INVALID 0
+#define IC_OPEN_PIC 1
+#define IC_PPC_XIC 2
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+/*
+ * Begining of traceback info work for asm functions.
+ */
+#define TB_ASM 0x000C000000000000
+#define TB_GLOBALLINK 0x0000800000000000
+#define TB_IS_EPROL 0x0000400000000000
+#define TB_HAS_TBOFF 0x0000200000000000
+#define TB_INT_PROC 0x0000100000000000
+#define TB_HAS_CTL 0x0000080000000000
+#define TB_TOCLESS 0x0000040000000000
+#define TB_FP_PRESENT 0x0000020000000000
+#define TB_LOG_ABORT 0x0000010000000000
+#define TB_INT_HNDL 0x0000008000000000
+#define TB_NAME_PRESENT 0x0000004000000000
+#define TB_SAVES_CR 0x0000000200000000
+#define TB_SAVES_LR 0x0000000100000000
+#define TB_STORES_BC 0x0000000080000000
+#define TB_PARMINFO 0x000000000000FFFF
+#define TB_DEFAULT TB_ASM | TB_HAS_TBOFF | TB_NAME_PRESENT
+
+#ifdef __ASSEMBLY__
+
+#define _GLOBAL(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _STATIC(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _TRACEBACK(NAME) \
+GLUE(.LT,NAME): ;\
+ .long 0 ;\
+ .llong TB_DEFAULT ;\
+ .long GLUE(.LT,NAME)-GLUE(.,NAME) ;\
+ .short GLUE(GLUE(.LT,NAME),_procname_end)-GLUE(GLUE(.LT,NAME),_procname_start) ;\
+GLUE(GLUE(.LT,NAME),_procname_start): ;\
+ .ascii __stringify(NAME) ;\
+GLUE(GLUE(.LT,NAME),_procname_end):
+
+#endif /* __ASSEMBLY__ */
+
+
+/* Macros for setting and retrieving special purpose registers */
+
+#define mfmsr() ({unsigned long rval; \
+ asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+
+#define mtmsrd(v) asm volatile("mtmsrd %0" : : "r" (v))
+
+#define mfspr(rn) ({unsigned long rval; \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
+
+#define mftb() ({unsigned long rval; \
+ asm volatile("mftb %0" : "=r" (rval)); rval;})
+
+/* iSeries CTRL register (for runlatch) */
+
+#define CTRLT 0x098
+#define CTRLF 0x088
+#define RUNLATCH 0x0001
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_low() asm volatile("or 1,1,1")
+#define HMT_medium() asm volatile("or 2,2,2")
+#define HMT_high() asm volatile("or 3,3,3")
+
+/* Size of an exception stack frame contained in the paca. */
+#define EXC_FRAME_SIZE 64
+
+#define mfasr() ({unsigned long rval; \
+ asm volatile("mfasr %0" : "=r" (rval)); rval;})
+
+#ifndef __ASSEMBLY__
+extern int _machine;
+extern int have_of;
+
+struct task_struct;
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void release_thread(struct task_struct *);
+
+/*
+ * Create a new kernel thread.
+ */
+extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+/*
+ * Bus types
+ */
+#define EISA_bus 0
+#define EISA_bus__is_a_macro /* for versions in ksyms.c */
+#define MCA_bus 0
+#define MCA_bus__is_a_macro /* for versions in ksyms.c */
+
+/* Lazy FPU handling on uni-processor */
+extern struct task_struct *last_task_used_math;
+
+
+#ifdef __KERNEL__
+/* 64-bit user address space is 41-bits (2TBs user VM) */
+#define TASK_SIZE_USER64 (0x0000020000000000UL)
+
+/*
+ * 32-bit user address space is 4GB - 1 page
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
+
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_USER32 : TASK_SIZE_USER64)
+#endif /* __KERNEL__ */
+
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE_USER32 (STACK_TOP_USER32 / 4)
+#define TASK_UNMAPPED_BASE_USER64 (STACK_TOP_USER64 / 4)
+
+#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
+ TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+ unsigned long ksp; /* Kernel stack pointer */
+ struct pt_regs *regs; /* Pointer to saved register state */
+ mm_segment_t fs; /* for get_fs() validation */
+ void *pgdir; /* root of page-table tree */
+ signed long last_syscall;
+ double fpr[32]; /* Complete floating point set */
+ unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
+ unsigned long fpscr; /* Floating point status */
+};
+
+#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+
+#define INIT_THREAD { \
+ INIT_SP, /* ksp */ \
+ (struct pt_regs *)INIT_SP - 1, /* regs */ \
+ KERNEL_DS, /*fs*/ \
+ swapper_pg_dir, /* pgdir */ \
+ 0, /* last_syscall */ \
+ {0}, 0, 0 \
+}
+
+/*
+ * Note: the vm_start and vm_end fields here should *not*
+ * be in kernel space. (Could vm_end == vm_start perhaps?)
+ */
+#define IOREMAP_MMAP { &ioremap_mm, 0, 0x1000, NULL, \
+ PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, \
+ 1, NULL, NULL }
+
+extern struct mm_struct ioremap_mm;
+
+/*
+ * Return saved PC of a blocked thread. For now, this is the "user" PC
+ */
+static inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return (t->regs) ? t->regs->nip : 0;
+}
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+
+#define cpu_relax() do { } while (0)
+
+/*
+ * Prefetch macros.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+}
+
+extern inline void prefetchw(const void *x)
+{
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#endif /* ASSEMBLY */
+
+#endif /* __ASM_PPC64_PROCESSOR_H */
--- /dev/null
+#ifndef _PPC64_PROM_H
+#define _PPC64_PROM_H
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
+#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
+#define RELOC(x) (*PTRRELOC(&(x)))
+
+#define LONG_LSW(X) (((unsigned long)X) & 0xffffffff)
+#define LONG_MSW(X) (((unsigned long)X) >> 32)
+
+typedef u32 phandle;
+typedef void *ihandle;
+typedef u32 phandle32;
+typedef u32 ihandle32;
+
+extern char *prom_display_paths[];
+extern unsigned int prom_num_displays;
+
+struct address_range {
+ unsigned long space;
+ unsigned long address;
+ unsigned long size;
+};
+
+struct interrupt_info {
+ int line;
+ int sense; /* +ve/-ve logic, edge or level, etc. */
+};
+
+struct pci_address {
+ u32 a_hi;
+ u32 a_mid;
+ u32 a_lo;
+};
+
+struct pci_range32 {
+ struct pci_address child_addr;
+ unsigned int parent_addr;
+ unsigned long size;
+};
+
+struct pci_range64 {
+ struct pci_address child_addr;
+ unsigned long parent_addr;
+ unsigned long size;
+};
+
+union pci_range {
+ struct {
+ struct pci_address addr;
+ u32 phys;
+ u32 size_hi;
+ } pci32;
+ struct {
+ struct pci_address addr;
+ u32 phys_hi;
+ u32 phys_lo;
+ u32 size_hi;
+ u32 size_lo;
+ } pci64;
+};
+
+struct _of_tce_table {
+ phandle node;
+ unsigned long base;
+ unsigned long size;
+};
+
+struct reg_property {
+ unsigned long address;
+ unsigned long size;
+};
+
+struct reg_property32 {
+ unsigned int address;
+ unsigned int size;
+};
+
+struct reg_property64 {
+ unsigned long address;
+ unsigned long size;
+};
+
+struct translation_property {
+ unsigned long virt;
+ unsigned long size;
+ unsigned long phys;
+ unsigned int flags;
+};
+
+struct property {
+ char *name;
+ int length;
+ unsigned char *value;
+ struct property *next;
+};
+
+/* NOTE: the device_node contains PCI specific info for pci devices.
+ * This perhaps could be hung off the device_node with another struct,
+ * but for now it is directly in the node. The phb ptr is a good
+ * indication of a real PCI node. Other nodes leave these fields zeroed.
+ */
+struct pci_controller;
+struct TceTable;
+struct device_node {
+ char *name;
+ char *type;
+ phandle node;
+ int n_addrs;
+ struct address_range *addrs;
+ int n_intrs;
+ struct interrupt_info *intrs;
+ char *full_name;
+ int busno; /* for pci devices */
+ int devfn; /* for pci devices */
+ struct pci_controller *phb; /* for pci devices */
+ int status; /* current status of device */
+ struct TceTable *tce_table; /* for phb's or bridges */
+#define DN_STATUS_BIST_FAILED (1<<0)
+ struct property *properties;
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ struct device_node *next; /* next device of same type */
+ struct device_node *allnext; /* next in list of all nodes */
+};
+
+typedef u32 prom_arg_t;
+
+struct prom_args {
+ u32 service;
+ u32 nargs;
+ u32 nret;
+ prom_arg_t args[10];
+ prom_arg_t *rets; /* Pointer to return values in args[16]. */
+};
+
+typedef struct {
+ u32 printf; /* void (*printf)(char *, ...); */
+ u32 memdump; /* void (*memdump)(unsigned char *, unsigned long); */
+ u32 dummy; /* void (*dummy)(void); */
+} yaboot_debug_t;
+
+struct prom_t {
+ unsigned long entry;
+ ihandle chosen;
+ int cpu;
+ ihandle stdout;
+ ihandle disp_node;
+ struct prom_args args;
+ unsigned long version;
+ unsigned long encode_phys_size;
+ struct bi_record *bi_recs;
+#ifdef DEBUG_YABOOT
+ yaboot_debug_t *yaboot;
+#endif
+};
+
+extern struct prom_t prom;
+
+/* Prototypes */
+extern void abort(void);
+extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, yaboot_debug_t *);
+extern void prom_print(const char *msg);
+extern void relocate_nodes(void);
+extern void finish_device_tree(void);
+extern struct device_node *find_devices(const char *name);
+extern struct device_node *find_type_devices(const char *type);
+extern struct device_node *find_path_device(const char *path);
+extern struct device_node *find_compatible_devices(const char *type,
+ const char *compat);
+extern struct device_node *find_pci_device_OFnode(unsigned char bus,
+ unsigned char dev_fn);
+extern struct device_node *find_all_nodes(void);
+extern int device_is_compatible(struct device_node *device, const char *);
+extern int machine_is_compatible(const char *compat);
+extern unsigned char *get_property(struct device_node *node, const char *name,
+ int *lenp);
+extern void print_properties(struct device_node *node);
+extern int prom_n_addr_cells(struct device_node* np);
+extern int prom_n_size_cells(struct device_node* np);
+extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
+extern void prom_drawstring(const char *c);
+extern void prom_drawhex(unsigned long v);
+extern void prom_drawchar(char c);
+
+#endif /* _PPC64_PROM_H */
--- /dev/null
+#ifndef _PPC64_PTRACE_H
+#define _PPC64_PTRACE_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ *
+ * this should only contain volatile regs
+ * since we can keep non-volatile in the thread_struct
+ * should set this up when only volatiles are saved
+ * by intr code.
+ *
+ * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
+ * that the overall structure is a multiple of 16 bytes in length.
+ *
+ * Note that the offsets of the fields in this struct correspond with
+ * the PT_* values below. This simplifies arch/ppc/kernel/ptrace.c.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASSEMBLY__
+#define PPC_REG unsigned long
+struct pt_regs {
+ PPC_REG gpr[32];
+ PPC_REG nip;
+ PPC_REG msr;
+ PPC_REG orig_gpr3; /* Used for restarting system calls */
+ PPC_REG ctr;
+ PPC_REG link;
+ PPC_REG xer;
+ PPC_REG ccr;
+ PPC_REG softe; /* Soft enabled/disabled */
+ PPC_REG trap; /* Reason for being here */
+ PPC_REG dar; /* Fault registers */
+ PPC_REG dsisr;
+ PPC_REG result; /* Result of a system call */
+};
+
+#define PPC_REG_32 unsigned int
+struct pt_regs32 {
+ PPC_REG_32 gpr[32];
+ PPC_REG_32 nip;
+ PPC_REG_32 msr;
+ PPC_REG_32 orig_gpr3; /* Used for restarting system calls */
+ PPC_REG_32 ctr;
+ PPC_REG_32 link;
+ PPC_REG_32 xer;
+ PPC_REG_32 ccr;
+ PPC_REG_32 mq; /* 601 only (not used at present) */
+ /* Used on APUS to hold IPL value. */
+ PPC_REG_32 trap; /* Reason for being here */
+ PPC_REG_32 dar; /* Fault registers */
+ PPC_REG_32 dsisr;
+ PPC_REG_32 result; /* Result of a system call */
+};
+
+#endif
+
+#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
+
+/* Size of stack frame allocated when calling signal handler. */
+/* FIXME: What should this be on 64-bit kernel (64 for 32-bit) */
+#define __SIGNAL_FRAMESIZE 64
+#define __SIGNAL_FRAMESIZE32 64
+
+#define instruction_pointer(regs) ((regs)->nip)
+#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ */
+#define PT_R0 0
+#define PT_R1 1
+#define PT_R2 2
+#define PT_R3 3
+#define PT_R4 4
+#define PT_R5 5
+#define PT_R6 6
+#define PT_R7 7
+#define PT_R8 8
+#define PT_R9 9
+#define PT_R10 10
+#define PT_R11 11
+#define PT_R12 12
+#define PT_R13 13
+#define PT_R14 14
+#define PT_R15 15
+#define PT_R16 16
+#define PT_R17 17
+#define PT_R18 18
+#define PT_R19 19
+#define PT_R20 20
+#define PT_R21 21
+#define PT_R22 22
+#define PT_R23 23
+#define PT_R24 24
+#define PT_R25 25
+#define PT_R26 26
+#define PT_R27 27
+#define PT_R28 28
+#define PT_R29 29
+#define PT_R30 30
+#define PT_R31 31
+
+#define PT_NIP 32
+#define PT_MSR 33
+#ifdef __KERNEL__
+#define PT_ORIG_R3 34
+#endif
+#define PT_CTR 35
+#define PT_LNK 36
+#define PT_XER 37
+#define PT_CCR 38
+#define PT_SOFTE 39
+#define PT_RESULT 43
+
+#define PT_FPR0 48
+#ifdef __KERNEL__
+#define PT_FPSCR (PT_FPR0 + 32 + 1) /* each FP reg occupies 1 slot in this space */
+#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* To the 32-bit user - each FP reg occupies 2 slots in this space */
+#else
+#define PT_FPSCR (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 slots in this space -- Fix when 64-bit apps. */
+#endif
+
+/* Additional PTRACE requests implemented on PowerPC. */
+#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
+#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
+#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
+#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
+#define PPC_PTRACE_PEEKTEXT_3264 0x95 /* Read word at location ADDR on a 64-bit process from a 32-bit process. */
+#define PPC_PTRACE_PEEKDATA_3264 0x94 /* Read word at location ADDR on a 64-bit process from a 32-bit process. */
+#define PPC_PTRACE_POKETEXT_3264 0x93 /* Write word at location ADDR on a 64-bit process from a 32-bit process. */
+#define PPC_PTRACE_POKEDATA_3264 0x92 /* Write word at location ADDR on a 64-bit process from a 32-bit process. */
+#define PPC_PTRACE_PEEKUSR_3264 0x91 /* Read a register (specified by ADDR) out of the "user area" on a 64-bit process from a 32-bit process. */
+#define PPC_PTRACE_POKEUSR_3264 0x90 /* Write DATA into location ADDR within the "user area" on a 64-bit process from a 32-bit process. */
+
+
+#endif /* _PPC64_PTRACE_H */
--- /dev/null
+#ifndef _PPC64_RESOURCE_H
+#define _PPC64_RESOURCE_H
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#define RLIMIT_AS 9 /* address space limit(?) */
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
+
+#define RLIM_NLIMITS 11
+
+#ifdef __KERNEL__
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
+
+#define INIT_RLIMITS \
+{ \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { _STK_LIM, RLIM_INFINITY }, \
+ { 0, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { 0, 0 }, \
+ { INR_OPEN, INR_OPEN }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _PPC64_RESOURCE_H */
--- /dev/null
+#ifndef _PPC64_RTAS_H
+#define _PPC64_RTAS_H
+
+#include <linux/spinlock.h>
+
+/*
+ * Definitions for talking to the RTAS on CHRP machines.
+ *
+ * Copyright (C) 2001 Peter Bergner
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define RTAS_UNKNOWN_SERVICE (-1)
+/*
+ * In general to call RTAS use rtas_token("string") to lookup
+ * an RTAS token for the given string (e.g. "event-scan").
+ * To actually perform the call use
+ * ret = rtas_call(token, n_in, n_out, ...)
+ * Where n_in is the number of input parameters and
+ * n_out is the number of output parameters
+ *
+ * If the "string" is invalid on this system, RTAS_UNKOWN_SERVICE
+ * will be returned as a token. rtas_call() does look for this
+ * token and error out gracefully so rtas_call(rtas_token("str"), ...)
+ * may be safely used for one-shot calls to RTAS.
+ *
+ */
+
+typedef u32 rtas_arg_t;
+
+struct rtas_args {
+ u32 token;
+ u32 nargs;
+ u32 nret;
+ rtas_arg_t args[16];
+ spinlock_t lock;
+ rtas_arg_t *rets; /* Pointer to return values in args[]. */
+};
+
+struct rtas_t {
+ unsigned long entry; /* physical address pointer */
+ unsigned long base; /* physical address pointer */
+ unsigned long size;
+ spinlock_t lock;
+
+ struct device_node *dev; /* virtual address pointer */
+};
+
+/* Event classes */
+#define INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define EPOW_WARNING 0x40000000 /* set bit 1 */
+#define POWERMGM_EVENTS 0x20000000 /* set bit 2 */
+#define HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
+#define EVENT_SCAN_ALL_EVENTS 0xf0000000
+
+/* event-scan returns */
+#define SEVERITY_FATAL 0x5
+#define SEVERITY_ERROR 0x4
+#define SEVERITY_ERROR_SYNC 0x3
+#define SEVERITY_WARNING 0x2
+#define SEVERITY_EVENT 0x1
+#define SEVERITY_NO_ERROR 0x0
+#define DISP_FULLY_RECOVERED 0x0
+#define DISP_LIMITED_RECOVERY 0x1
+#define DISP_NOT_RECOVERED 0x2
+#define PART_PRESENT 0x0
+#define PART_NOT_PRESENT 0x1
+#define INITIATOR_UNKNOWN 0x0
+#define INITIATOR_CPU 0x1
+#define INITIATOR_PCI 0x2
+#define INITIATOR_ISA 0x3
+#define INITIATOR_MEMORY 0x4
+#define INITIATOR_POWERMGM 0x5
+#define TARGET_UNKNOWN 0x0
+#define TARGET_CPU 0x1
+#define TARGET_PCI 0x2
+#define TARGET_ISA 0x3
+#define TARGET_MEMORY 0x4
+#define TARGET_POWERMGM 0x5
+#define TYPE_RETRY 0x01
+#define TYPE_TCE_ERR 0x02
+#define TYPE_INTERN_DEV_FAIL 0x03
+#define TYPE_TIMEOUT 0x04
+#define TYPE_DATA_PARITY 0x05
+#define TYPE_ADDR_PARITY 0x06
+#define TYPE_CACHE_PARITY 0x07
+#define TYPE_ADDR_INVALID 0x08
+#define TYPE_ECC_UNCORR 0x09
+#define TYPE_ECC_CORR 0x0a
+#define TYPE_EPOW 0x40
+/* I don't add PowerMGM events right now, this is a different topic */
+#define TYPE_PMGM_POWER_SW_ON 0x60
+#define TYPE_PMGM_POWER_SW_OFF 0x61
+#define TYPE_PMGM_LID_OPEN 0x62
+#define TYPE_PMGM_LID_CLOSE 0x63
+#define TYPE_PMGM_SLEEP_BTN 0x64
+#define TYPE_PMGM_WAKE_BTN 0x65
+#define TYPE_PMGM_BATTERY_WARN 0x66
+#define TYPE_PMGM_BATTERY_CRIT 0x67
+#define TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define TYPE_PMGM_SWITCH_TO_AC 0x69
+#define TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define TYPE_PMGM_RING_INDICATE 0x6d
+#define TYPE_PMGM_LAN_ATTENTION 0x6e
+#define TYPE_PMGM_TIME_ALARM 0x6f
+#define TYPE_PMGM_CONFIG_CHANGE 0x70
+#define TYPE_PMGM_SERVICE_PROC 0x71
+
+struct rtas_error_log {
+ unsigned long version:8; /* Architectural version */
+ unsigned long severity:3; /* Severity level of error */
+ unsigned long disposition:2; /* Degree of recovery */
+ unsigned long extended:1; /* extended log present? */
+ unsigned long /* reserved */ :2; /* Reserved for future use */
+ unsigned long initiator:4; /* Initiator of event */
+ unsigned long target:4; /* Target of failed operation */
+ unsigned long type:8; /* General event or error*/
+ unsigned long extended_log_length:32; /* length in bytes */
+ unsigned char buffer[1]; /* allocated by klimit bump */
+};
+
+extern struct rtas_t rtas;
+
+extern void enter_rtas(struct rtas_args *);
+extern int rtas_token(const char *service);
+extern long rtas_call(int token, int, int, unsigned long *, ...);
+extern void phys_call_rtas(int, int, int, ...);
+extern void phys_call_rtas_display_status(char);
+extern void call_rtas_display_status(char);
+extern void rtas_restart(char *cmd);
+extern void rtas_power_off(void);
+extern void rtas_halt(void);
+
+#endif /* _PPC64_RTAS_H */
--- /dev/null
+/*
+ * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
+ * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC64_RWSEM_H
+#define _PPC64_RWSEM_H
+
+#ifdef __KERNEL__
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ /* XXX this should be able to be an atomic_t -- paulus */
+ signed int count;
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+#define RWSEM_ACTIVE_BIAS 0x00000001
+#define RWSEM_ACTIVE_MASK 0x0000ffff
+#define RWSEM_WAITING_BIAS (-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#if RWSEM_DEBUG
+ int debug;
+#endif
+};
+
+/*
+ * initialisation
+ */
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT , 0
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+ sem->debug = 0;
+#endif
+}
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (atomic_inc_return((atomic_t *)(&sem->count)) >= 0)
+ smp_wmb();
+ else
+ rwsem_down_read_failed(sem);
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count));
+ if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
+ smp_wmb();
+ else
+ rwsem_down_write_failed(sem);
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ smp_wmb();
+ tmp = atomic_dec_return((atomic_t *)(&sem->count));
+ if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ smp_wmb();
+ if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+ atomic_add(delta, (atomic_t *)(&sem->count));
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+{
+ smp_mb();
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
+}
+
+#endif /* __KERNEL__ */
+#endif /* _PPC_RWSEM_XADD_H */
--- /dev/null
+#ifndef _PPC64_SCATTERLIST_H
+#define _PPC64_SCATTERLIST_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <asm/dma.h>
+
+struct scatterlist {
+ struct page *page;
+ unsigned int offset;
+ unsigned int length;
+
+ /* For TCE support */
+ u32 dma_address;
+ u32 dma_length;
+};
+
+#define ISA_DMA_THRESHOLD (~0UL)
+
+#endif /* !(_PPC64_SCATTERLIST_H) */
--- /dev/null
+#ifndef __PPC64_SEGMENT_H
+#define __PPC64_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* __PPC64_SEGMENT_H */
--- /dev/null
+#ifndef _PPC64_SEMAPHORE_H
+#define _PPC64_SEMAPHORE_H
+
+/*
+ * Remove spinlock-based RW semaphores; RW semaphore definitions are
+ * now in rwsem.h and we use the the generic lib/rwsem.c implementation.
+ * Rework semaphores to use atomic_dec_if_positive.
+ * -- Paul Mackerras (paulus@samba.org)
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+struct semaphore {
+ /*
+ * Note that any negative value of count is equivalent to 0,
+ * but additionally indicates that some process(es) might be
+ * sleeping on `wait'.
+ */
+ atomic_t count;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) \
+ , (long)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
+#define __SEMAPHORE_INITIALIZER(name, count) \
+ { ATOMIC_INIT(count), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name, 1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+ atomic_set(&sem->count, val);
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+#endif
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+extern void __down(struct semaphore * sem);
+extern int __down_interruptible(struct semaphore * sem);
+extern void __up(struct semaphore * sem);
+
+extern inline void down(struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ /*
+ * Try to get the semaphore, take the slow path if we fail.
+ */
+ if (atomic_dec_return(&sem->count) < 0)
+ __down(sem);
+ smp_wmb();
+}
+
+extern inline int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_interruptible(sem);
+ smp_wmb();
+ return ret;
+}
+
+extern inline int down_trylock(struct semaphore * sem)
+{
+ int ret;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ ret = atomic_dec_if_positive(&sem->count) < 0;
+ smp_wmb();
+ return ret;
+}
+
+extern inline void up(struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ smp_wmb();
+ if (atomic_inc_return(&sem->count) <= 0)
+ __up(sem);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* !(_PPC64_SEMAPHORE_H) */
--- /dev/null
+#ifndef _PPC64_SEMBUF_H
+#define _PPC64_SEMBUF_H
+
+/*
+ * The semid64_ds structure for PPC architecture.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _PPC64_SEMBUF_H */
--- /dev/null
+/*
+ * include/asm-ppc/serial.h
+ */
+
+#include <linux/config.h>
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define RS_TABLE_SIZE 64
+#else
+#define RS_TABLE_SIZE 4
+#endif
+
+/* Standard COM flags (except for COM4, because of the 8514 problem) */
+#ifdef CONFIG_SERIAL_DETECT_IRQ
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
+#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#else
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#endif
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define FOURPORT_FLAGS ASYNC_FOURPORT
+#define ACCENT_FLAGS 0
+#define BOCA_FLAGS 0
+#define HUB6_FLAGS 0
+#endif
+
+/*
+ * The following define the access methods for the HUB6 card. All
+ * access is through two ports for all 24 possible chips. The card is
+ * selected through the high 2 bits, the port on that card with the
+ * "middle" 3 bits, and the register on that port with the bottom
+ * 3 bits.
+ *
+ * While the access port and interrupt is configurable, the default
+ * port locations are 0x302 for the port control register, and 0x303
+ * for the data read/write register. Normally, the interrupt is at irq3
+ * but can be anything from 3 to 7 inclusive. Note that using 3 will
+ * require disabling com2.
+ */
+
+#define C_P(card,port) (((card)<<6|(port)<<3) + 1)
+
+#define STD_SERIAL_PORT_DEFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
+ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
+ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
+ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
+
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define EXTRA_SERIAL_PORT_DEFNS \
+ { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
+ { 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \
+ { 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \
+ { 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS }, /* ttyS7 */ \
+ { 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS }, /* ttyS8 */ \
+ { 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS }, /* ttyS9 */ \
+ { 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS }, /* ttyS10 */ \
+ { 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \
+ { 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \
+ { 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
+ { 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \
+ { 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \
+ { 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \
+ { 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS }, /* ttyS19 */ \
+ { 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS }, /* ttyS20 */ \
+ { 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS }, /* ttyS21 */ \
+ { 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS }, /* ttyS22 */ \
+ { 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS }, /* ttyS23 */ \
+ { 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS }, /* ttyS24 */ \
+ { 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS }, /* ttyS25 */ \
+ { 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS }, /* ttyS26 */ \
+ { 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS }, /* ttyS27 */ \
+ { 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS }, /* ttyS28 */ \
+ { 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \
+ { 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \
+ { 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */
+#else
+#define EXTRA_SERIAL_PORT_DEFNS
+#endif
+
+/* You can have up to four HUB6's in the system, but I've only
+ * included two cards here for a total of twelve ports.
+ */
+#if (defined(CONFIG_HUB6) && defined(CONFIG_SERIAL_MANY_PORTS))
+#define HUB6_SERIAL_PORT_DFNS \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,0) }, /* ttyS32 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,1) }, /* ttyS33 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,2) }, /* ttyS34 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,3) }, /* ttyS35 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,4) }, /* ttyS36 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,5) }, /* ttyS37 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,0) }, /* ttyS38 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,1) }, /* ttyS39 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,2) }, /* ttyS40 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,3) }, /* ttyS41 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,4) }, /* ttyS42 */ \
+ { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,5) }, /* ttyS43 */
+#else
+#define HUB6_SERIAL_PORT_DFNS
+#endif
+
+#define MCA_SERIAL_PORT_DFNS
+
+#define SERIAL_PORT_DFNS \
+ STD_SERIAL_PORT_DEFNS \
+ EXTRA_SERIAL_PORT_DEFNS \
+ HUB6_SERIAL_PORT_DFNS \
+ MCA_SERIAL_PORT_DFNS
--- /dev/null
+#ifndef _PPC_SETUP_H
+#define _PPC_SETUP_H
+
+/* This is a place holder include */
+
+#endif /* _PPC_SETUP_H */
--- /dev/null
+#ifndef _PPC64_SHMBUF_H
+#define _PPC64_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for PPC64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _PPC64_SHMBUF_H */
--- /dev/null
+#ifndef _PPC64_SHMPARAM_H
+#define _PPC64_SHMPARAM_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _PPC64_SHMPARAM_H */
--- /dev/null
+#ifndef _ASM_PPC64_SIGCONTEXT_H
+#define _ASM_PPC64_SIGCONTEXT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ptrace.h>
+
+struct sigcontext_struct {
+ unsigned long _unused[4];
+ int signal;
+ unsigned long handler;
+ unsigned long oldmask;
+ struct pt_regs *regs;
+};
+
+#endif /* _ASM_PPC64_SIGCONTEXT_H */
--- /dev/null
+#ifndef _PPC64_SIGINFO_H
+#define _PPC64_SIGINFO_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+typedef union sigval {
+ int sival_int;
+ void *sival_ptr;
+} sigval_t;
+
+#define SI_MAX_SIZE 128
+#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ unsigned int _timer1;
+ unsigned int _timer2;
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void *_addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+
+#ifdef __KERNEL__
+#define __SI_MASK 0xffff0000
+#define __SI_KILL (0 << 16)
+#define __SI_TIMER (1 << 16)
+#define __SI_POLL (2 << 16)
+#define __SI_FAULT (3 << 16)
+#define __SI_CHLD (4 << 16)
+#define __SI_RT (5 << 16)
+#define __SI_CODE(T,N) ((T) << 16 | ((N) & 0xffff))
+#else
+#define __SI_KILL 0
+#define __SI_TIMER 0
+#define __SI_POLL 0
+#define __SI_FAULT 0
+#define __SI_CHLD 0
+#define __SI_RT 0
+#define __SI_CODE(T,N) (N)
+#endif
+
+/*
+ * si_code values
+ * Digital reserves positive values for kernel-generated signals.
+ */
+#define SI_USER 0 /* sent by kill, sigsend, raise */
+#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define SI_QUEUE -1 /* sent by sigqueue */
+#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
+#define SI_MESGQ -3 /* sent by real time mesq state change */
+#define SI_ASYNCIO -4 /* sent by AIO completion */
+#define SI_SIGIO -5 /* sent by queued SIGIO */
+#define SI_TKILL -6 /* sent by tkill system call */
+
+#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
+#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
+#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
+#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
+#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
+#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
+#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
+#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
+#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
+#define NSIGILL 8
+
+/*
+ * SIGFPE si_codes
+ */
+#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
+#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
+#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
+#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
+#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
+#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
+#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
+#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
+#define NSIGFPE 8
+
+/*
+ * SIGSEGV si_codes
+ */
+#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
+#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
+#define NSIGSEGV 2
+
+/*
+ * SIGBUS si_codes
+ */
+#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
+#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
+#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
+#define NSIGBUS 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
+#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
+#define NSIGTRAP 2
+
+/*
+ * SIGCHLD si_codes
+ */
+#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
+#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
+#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
+#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
+#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
+#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
+#define NSIGCHLD 6
+
+/*
+ * SIGPOLL si_codes
+ */
+#define POLL_IN (__SI_POLL|1) /* data input available */
+#define POLL_OUT (__SI_POLL|2) /* output buffers available */
+#define POLL_MSG (__SI_POLL|3) /* input message available */
+#define POLL_ERR (__SI_POLL|4) /* i/o error */
+#define POLL_PRI (__SI_POLL|5) /* high priority input available */
+#define POLL_HUP (__SI_POLL|6) /* device disconnected */
+#define NSIGPOLL 6
+
+/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+ * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
+ * thread manager then catches and does the appropriate nonsense.
+ * However, everything is written out here so as to not get lost.
+ */
+#define SIGEV_SIGNAL 0 /* notify via signal */
+#define SIGEV_NONE 1 /* other notification: meaningless */
+#define SIGEV_THREAD 2 /* deliver via thread creation */
+
+#define SIGEV_MAX_SIZE 64
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[SIGEV_PAD_SIZE];
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute; /* really pthread_attr_t */
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+
+extern inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(siginfo_t));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
+}
+
+extern int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from);
+
+#endif /* __KERNEL__ */
+
+#endif /* _PPC64_SIGINFO_H */
--- /dev/null
+#ifndef _ASMPPC64_SIGNAL_H
+#define _ASMPPC64_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#define _NSIG 64
+#define _NSIG_BPW 64
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+
+
+
+
+
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#define SA_SHIRQ 0x04000000
+#endif
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif /* _ASMPPC64_SIGNAL_H */
--- /dev/null
+/*
+ * smp.h: PPC64 specific SMP code.
+ *
+ * Original was a copy of sparc smp.h. Now heavily modified
+ * for PPC.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+#ifndef _PPC64_SMP_H
+#define _PPC64_SMP_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_SMP
+
+#ifndef __ASSEMBLY__
+
+#include <asm/Paca.h>
+
+extern unsigned long cpu_online_map;
+
+extern void smp_message_pass(int target, int msg, unsigned long data, int wait);
+extern void smp_store_cpu_info(int id);
+extern void smp_send_tlb_invalidate(int);
+extern void smp_send_xmon_break(int cpu);
+struct pt_regs;
+extern void smp_message_recv(int, struct pt_regs *);
+extern void smp_send_reschedule_all(void);
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+/* 1 to 1 mapping on PPC -- Cort */
+#define cpu_logical_map(cpu) (cpu)
+#define cpu_number_map(x) (x)
+extern volatile unsigned long cpu_callin_map[NR_CPUS];
+
+#define smp_processor_id() (get_paca()->xPacaIndex)
+#define hard_smp_processor_id() (get_paca()->xHwProcNum)
+#define get_hard_smp_processor_id(CPU) (xPaca[(CPU)].xHwProcNum)
+
+
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
+ *
+ * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
+ * in /proc/interrupts will be wrong!!! --Troy */
+#define PPC_MSG_CALL_FUNCTION 0
+#define PPC_MSG_RESCHEDULE 1
+#define PPC_MSG_MIGRATE_TASK 2
+#define PPC_MSG_XMON_BREAK 3
+
+void smp_init_iSeries(void);
+void smp_init_pSeries(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* !(CONFIG_SMP) */
+#endif /* !(_PPC64_SMP_H) */
+#endif /* __KERNEL__ */
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+#define kernel_locked() spin_is_locked(&kernel_flag)
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) { \
+ spin_unlock(&kernel_flag); \
+ if (global_irq_holder == (cpu)) \
+ BUG(); \
+ } \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+static __inline__ void unlock_kernel(void)
+{
+ if (current->lock_depth < 0)
+ BUG();
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_RCVLOWAT 16
+#define SO_SNDLOWAT 17
+#define SO_RCVTIMEO 18
+#define SO_SNDTIMEO 19
+#define SO_PASSCRED 20
+#define SO_PEERCRED 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+
+/* Nast libc5 fixup - bletch */
+#if defined(__KERNEL__)
+/* Socket types. */
+#define SOCK_STREAM 1 /* stream (connection) socket */
+#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
+#define SOCK_RAW 3 /* raw socket */
+#define SOCK_RDM 4 /* reliably-delivered message */
+#define SOCK_SEQPACKET 5 /* sequential packet socket */
+#define SOCK_PACKET 10 /* linux specific way of */
+ /* getting packets at the dev */
+ /* level. For writing rarp and */
+ /* other similar things on the */
+ /* user level. */
+#define SOCK_MAX (SOCK_PACKET+1)
+#endif
+
+#endif /* _ASM_SOCKET_H */
--- /dev/null
+#ifndef _ASM_PPC64_SOCKIOS_H
+#define _ASM_PPC64_SOCKIOS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+#endif /* _ASM_PPC64_SOCKIOS_H */
--- /dev/null
+#ifndef __ASM_SOFTIRQ_H
+#define __ASM_SOFTIRQ_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+
+
+#define local_bh_disable() do { local_bh_count(smp_processor_id())++; barrier(); } while (0)
+#define __local_bh_enable() do { barrier(); local_bh_count(smp_processor_id())--; } while (0)
+
+#define local_bh_enable() \
+do { \
+ if (!--local_bh_count(smp_processor_id()) \
+ && softirq_pending(smp_processor_id())) { \
+ do_softirq(); \
+ } \
+} while (0)
+
+
+#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+
+#endif /* __ASM_SOFTIRQ_H */
--- /dev/null
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+/*
+ * Simple spin lock operations.
+ *
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * Type of int is used as a full 64b word is not necessary.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+#ifdef __KERNEL__
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_is_locked(x) ((x)->lock != 0)
+
+static __inline__ int _raw_spin_trylock(spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # spin_trylock\n\
+ cmpwi 0,%0,0\n\
+ li %0,0\n\
+ bne- 2f\n\
+ li %0,1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b\n\
+ isync\n\
+2:" : "=&r"(tmp)
+ : "r"(&lock->lock)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static __inline__ void _raw_spin_lock(spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ "b 2f # spin_lock\n\
+1: or 1,1,1 # spin at low priority\n\
+ lwzx %0,0,%1\n\
+ cmpwi 0,%0,0\n\
+ bne+ 1b\n\
+ or 2,2,2 # back to medium priority\n\
+2: lwarx %0,0,%1\n\
+ cmpwi 0,%0,0\n\
+ bne- 1b\n\
+ stwcx. %2,0,%1\n\
+ bne- 2b\n\
+ isync"
+ : "=&r"(tmp)
+ : "r"(&lock->lock), "r"(1)
+ : "cr0", "memory");
+}
+
+static __inline__ void _raw_spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("eieio # spin_unlock": : :"memory");
+ lock->lock = 0;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile signed int lock;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+
+static __inline__ int _raw_read_trylock(rwlock_t *rw)
+{
+ unsigned int tmp;
+ unsigned int ret;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # read_trylock\n\
+ li %1,0\n\
+ extsw %0,%0\n\
+ addic. %0,%0,1\n\
+ ble- 2f\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b\n\
+ li %1,1\n\
+ isync\n\
+2:" : "=&r"(tmp), "=&r"(ret)
+ : "r"(&rw->lock)
+ : "cr0", "memory");
+
+ return ret;
+}
+
+static __inline__ void _raw_read_lock(rwlock_t *rw)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ "b 2f # read_lock\n\
+1: or 1,1,1 # spin at low priority\n\
+ lwax %0,0,%1\n\
+ cmpwi 0,%0,0\n\
+ blt+ 1b\n\
+ or 2,2,2 # back to medium priority\n\
+2: lwarx %0,0,%1\n\
+ extsw %0,%0\n\
+ addic. %0,%0,1\n\
+ ble- 1b\n\
+ stwcx. %0,0,%1\n\
+ bne- 2b\n\
+ isync"
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "memory");
+}
+
+static __inline__ void _raw_read_unlock(rwlock_t *rw)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ "eieio # read_unlock\n\
+1: lwarx %0,0,%1\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "memory");
+}
+
+static __inline__ int _raw_write_trylock(rwlock_t *rw)
+{
+ unsigned int tmp;
+ unsigned int ret;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # write_trylock\n\
+ cmpwi 0,%0,0\n\
+ li %1,0\n\
+ bne- 2f\n\
+ stwcx. %3,0,%2\n\
+ bne- 1b\n\
+ li %1,1\n\
+ isync\n\
+2:" : "=&r"(tmp), "=&r"(ret)
+ : "r"(&rw->lock), "r"(-1)
+ : "cr0", "memory");
+
+ return ret;
+}
+
+static __inline__ void _raw_write_lock(rwlock_t *rw)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ "b 2f # write_lock\n\
+1: or 1,1,1 # spin at low priority\n\
+ lwax %0,0,%1\n\
+ cmpwi 0,%0,0\n\
+ bne+ 1b\n\
+ or 2,2,2 # back to medium priority\n\
+2: lwarx %0,0,%1\n\
+ cmpwi 0,%0,0\n\
+ bne- 1b\n\
+ stwcx. %2,0,%1\n\
+ bne- 2b\n\
+ isync"
+ : "=&r"(tmp)
+ : "r"(&rw->lock), "r"(-1)
+ : "cr0", "memory");
+}
+
+static __inline__ void _raw_write_unlock(rwlock_t *rw)
+{
+ __asm__ __volatile__("eieio # write_unlock": : :"memory");
+ rw->lock = 0;
+}
+
+static __inline__ int is_read_locked(rwlock_t *rw)
+{
+ return rw->lock > 0;
+}
+
+static __inline__ int is_write_locked(rwlock_t *rw)
+{
+ return rw->lock < 0;
+}
+
+#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+
+#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SPINLOCK_H */
--- /dev/null
+#ifndef _PPC64_STAT_H
+#define _PPC64_STAT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+struct stat {
+ dev_t st_dev;
+ ino_t st_ino;
+ mode_t st_mode;
+ nlink_t st_nlink;
+ uid_t st_uid;
+ gid_t st_gid;
+ dev_t st_rdev;
+ off_t st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1. */
+struct stat64 {
+ unsigned long st_dev; /* Device. */
+ unsigned long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long st_rdev; /* Device number, if device. */
+ unsigned short __pad2;
+ long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+
+ long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int __unused1;
+ int st_mtime; /* Time of last modification. */
+ unsigned int __unused2;
+ int st_ctime; /* Time of last status change. */
+ unsigned int __unused3;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+#endif
--- /dev/null
+#ifndef _PPC64_STATFS_H
+#define _PPC64_STATFS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __KERNEL_STRICT_NAMES
+#include <linux/types.h>
+typedef __kernel_fsid_t fsid_t;
+#endif
+
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_spare[6];
+};
+
+#endif /* _PPC64_STATFS_H */
--- /dev/null
+#ifndef _PPC64_STRING_H_
+#define _PPC64_STRING_H_
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRNCPY
+#define __HAVE_ARCH_STRLEN
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRCAT
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_BCOPY
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_MEMCHR
+
+extern int strcasecmp(const char *, const char *);
+extern int strncasecmp(const char *, const char *, int);
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern int strcmp(const char *,const char *);
+extern char * strcat(char *, const char *);
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern int memcmp(const void *,const void *,__kernel_size_t);
+extern void * memchr(const void *,int,__kernel_size_t);
+
+#endif /* _PPC64_STRING_H_ */
--- /dev/null
+#ifndef __PPC64_SYSTEM_H
+#define __PPC64_SYSTEM_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kdev_t.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/hw_irq.h>
+#include <asm/memory.h>
+
+/*
+ * System defines.
+ */
+#define KERNEL_START_PHYS 0x800000
+#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
+#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x00000)
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ *
+ * We can use the eieio instruction for wmb, but since it doesn't
+ * give any ordering guarantees about loads, we have to use the
+ * stronger but slower sync instruction for mb and rmb.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() __asm__ __volatile__("": : :"memory")
+#define smp_rmb() __asm__ __volatile__("": : :"memory")
+#define smp_wmb() __asm__ __volatile__("": : :"memory")
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_XMON
+extern void xmon_irq(int, void *, struct pt_regs *);
+extern void xmon(struct pt_regs *excp);
+#endif
+
+extern void print_backtrace(unsigned long *);
+extern void show_regs(struct pt_regs * regs);
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+extern int _get_PVR(void);
+extern long _get_L2CR(void);
+extern void _set_L2CR(unsigned long);
+extern void via_cuda_init(void);
+extern void pmac_nvram_init(void);
+extern void pmac_find_display(void);
+extern void giveup_fpu(struct task_struct *);
+extern void enable_kernel_fp(void);
+extern void giveup_altivec(struct task_struct *);
+extern void load_up_altivec(struct task_struct *);
+extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
+extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern int abs(int);
+extern void cacheable_memzero(void *p, unsigned int nb);
+
+struct device_node;
+
+struct task_struct;
+#define prepare_to_switch() do { } while(0)
+#define switch_to(prev,next) _switch_to((prev),(next))
+extern void _switch_to(struct task_struct *, struct task_struct *);
+
+struct thread_struct;
+extern void _switch(struct thread_struct *prev, struct thread_struct *next);
+
+struct pt_regs;
+extern void dump_regs(struct pt_regs *);
+
+#ifndef CONFIG_SMP
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(flags) __save_flags(flags)
+#define restore_flags(flags) __restore_flags(flags)
+#define save_and_cli(flags) __save_and_cli(flags)
+
+#else /* CONFIG_SMP */
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+
+#endif /* !CONFIG_SMP */
+
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+#define local_irq_save(flags) __save_and_cli(flags)
+#define local_irq_restore(flags) __restore_flags(flags)
+
+static __inline__ int __is_processor(unsigned long pv)
+{
+ unsigned long pvr;
+ asm volatile("mfspr %0, 0x11F" : "=r" (pvr));
+ return(PVR_VER(pvr) == pv);
+}
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ *
+ * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
+ * is more like most of the other architectures.
+ */
+static __inline__ unsigned long
+__xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%3 # __xchg_u32\n\
+ stwcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+static __inline__ unsigned long
+__xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # __xchg_u64\n\
+ stdcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(volatile void *ptr, unsigned long x, int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define xchg(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+#define tas(ptr) (xchg((ptr),1))
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static __inline__ unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+ int prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n\
+ stwcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __inline__ unsigned long
+__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+{
+ int prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#endif
--- /dev/null
+#ifndef _PPC64_TERMBITS_H
+#define _PPC64_TERMBITS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+/*
+ * termios type and macro definitions. Be careful about adding stuff
+ * to this file since it's used in GNU libc and there are strict rules
+ * concerning namespace pollution.
+ */
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VMIN 5
+#define VEOL 6
+#define VTIME 7
+#define VEOL2 8
+#define VSWTC 9
+#define VWERASE 10
+#define VREPRINT 11
+#define VSUSP 12
+#define VSTART 13
+#define VSTOP 14
+#define VLNEXT 15
+#define VDISCARD 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IXON 0001000
+#define IXOFF 0002000
+#define IXANY 0004000
+#define IUCLC 0010000
+#define IMAXBEL 0020000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define ONLCR 0000002
+#define OLCUC 0000004
+
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+
+#define OFILL 00000100
+#define OFDEL 00000200
+#define NLDLY 00001400
+#define NL0 00000000
+#define NL1 00000400
+#define NL2 00001000
+#define NL3 00001400
+#define TABDLY 00006000
+#define TAB0 00000000
+#define TAB1 00002000
+#define TAB2 00004000
+#define TAB3 00006000
+#define CRDLY 00030000
+#define CR0 00000000
+#define CR1 00010000
+#define CR2 00020000
+#define CR3 00030000
+#define FFDLY 00040000
+#define FF0 00000000
+#define FF1 00040000
+#define BSDLY 00100000
+#define BS0 00000000
+#define BS1 00100000
+#define VTDLY 00200000
+#define VT0 00000000
+#define VT1 00200000
+#define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */
+
+/* c_cflag bit meaning */
+#define CBAUD 0000377
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CBAUDEX 0000000
+#define B57600 00020
+#define B115200 00021
+#define B230400 00022
+#define B460800 00023
+#define B500000 00024
+#define B576000 00025
+#define B921600 00026
+#define B1000000 00027
+#define B1152000 00030
+#define B1500000 00031
+#define B2000000 00032
+#define B2500000 00033
+#define B3000000 00034
+#define B3500000 00035
+#define B4000000 00036
+
+#define CSIZE 00001400
+#define CS5 00000000
+#define CS6 00000400
+#define CS7 00001000
+#define CS8 00001400
+
+#define CSTOPB 00002000
+#define CREAD 00004000
+#define PARENB 00010000
+#define PARODD 00020000
+#define HUPCL 00040000
+
+#define CLOCAL 00100000
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0x00000080
+#define ICANON 0x00000100
+#define XCASE 0x00004000
+#define ECHO 0x00000008
+#define ECHOE 0x00000002
+#define ECHOK 0x00000004
+#define ECHONL 0x00000010
+#define NOFLSH 0x80000000
+#define TOSTOP 0x00400000
+#define ECHOCTL 0x00000040
+#define ECHOPRT 0x00000020
+#define ECHOKE 0x00000001
+#define FLUSHO 0x00800000
+#define PENDIN 0x20000000
+#define IEXTEN 0x00000400
+
+/* Values for the ACTION argument to `tcflow'. */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _PPC64_TERMBITS_H */
--- /dev/null
+#ifndef _PPC64_TERMIOS_H
+#define _PPC64_TERMIOS_H
+
+/*
+ * Liberally adapted from alpha/termios.h. In particular, the c_cc[]
+ * fields have been reordered so that termio & termios share the
+ * common subset in the same order (for brain dead programs that don't
+ * know or care about the differences).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ioctls.h>
+#include <asm/termbits.h>
+
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ short sg_flags;
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc;
+ char t_dsuspc;
+ char t_rprntc;
+ char t_flushc;
+ char t_werasc;
+ char t_lnextc;
+};
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 10
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* c_cc characters */
+#define _VINTR 0
+#define _VQUIT 1
+#define _VERASE 2
+#define _VKILL 3
+#define _VEOF 4
+#define _VMIN 5
+#define _VEOL 6
+#define _VTIME 7
+#define _VEOL2 8
+#define _VSWTC 9
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+#define N_X25 6 /* X.25 async */
+#define N_6PACK 7
+#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964 9 /* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IrDa - http://www.cs.uit.no/~dagb/irda/irda.html */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC 13 /* synchronous HDLC */
+#define N_SYNC_PPP 14
+
+#ifdef __KERNEL__
+/* ^C ^\ del ^U ^D 1 0 0 0 0 ^W ^R ^Z ^Q ^S ^V ^U */
+#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
+#endif
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+
+#define TIOCGETP _IOR('t', 8, struct sgttyb)
+#define TIOCSETP _IOW('t', 9, struct sgttyb)
+#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
+
+#define TIOCSETC _IOW('t', 17, struct tchars)
+#define TIOCGETC _IOR('t', 18, struct tchars)
+#define TCGETS _IOR('t', 19, struct termios)
+#define TCSETS _IOW('t', 20, struct termios)
+#define TCSETSW _IOW('t', 21, struct termios)
+#define TCSETSF _IOW('t', 22, struct termios)
+
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCGLTC _IOR('t', 116, struct ltchars)
+#define TIOCSLTC _IOW('t', 117, struct ltchars)
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+
+#define TIOCSTI 0x5412
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#ifdef __KERNEL__
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __KERNEL__ */
+
+#endif /* _PPC64_TERMIOS_H */
--- /dev/null
+/* thread_info.h: PPC low-level thread information
+ * adapted from the i386 version by Paul Mackerras
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <linux/stringify.h>
+
+/*
+ * low level task data.
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ int cpu; /* cpu we're on */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ task: &tsk, \
+ exec_domain: &default_exec_domain, \
+ flags: 0, \
+ cpu: 0, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* thread information allocation */
+
+#define THREAD_ORDER 2
+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
+
+#define alloc_thread_info() ((struct thread_info *) \
+ __get_free_pages(GFP_KERNEL, THREAD_ORDER))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#if THREAD_SIZE != (4*PAGE_SIZE)
+#error update vmlinux.lds and current_thread_info to match
+#endif
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("clrrdi %0,1,14" : "=r"(ti));
+ return ti;
+}
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * thread information flag bit numbers
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling
+ TIF_NEED_RESCHED */
+#define TIF_32BIT 5 /* 32 bit binary */
+#define TIF_RUN_LIGHT 6 /* iSeries run light */
+
+/* as above, but as bit values */
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_32BIT (1<<TIF_32BIT)
+#define _TIF_RUN_LIGHT (1<<TIF_RUN_LIGHT)
+
+#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
+ _TIF_NEED_RESCHED)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
--- /dev/null
+/*
+ *
+ * Common time prototypes and such for all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __PPC64_TIME_H
+#define __PPC64_TIME_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/mc146818rtc.h>
+
+#include <asm/processor.h>
+#include <asm/Paca.h>
+#include <asm/iSeries/HvCall.h>
+
+/* time.c */
+extern unsigned long tb_ticks_per_jiffy;
+extern unsigned long tb_ticks_per_usec;
+extern unsigned long tb_ticks_per_sec;
+extern unsigned long tb_to_xs;
+extern unsigned tb_to_us;
+extern unsigned long tb_last_stamp;
+
+struct rtc_time;
+extern void to_tm(int tim, struct rtc_time * tm);
+extern time_t last_rtc_update;
+
+/*
+ * By putting all of this stuff into a single struct we
+ * reduce the number of cache lines touched by do_gettimeofday.
+ * Both by collecting all of the data in one cache line and
+ * by touching only one TOC entry
+ */
+struct gettimeofday_vars {
+ unsigned long tb_to_xs;
+ unsigned long stamp_xsec;
+};
+
+struct gettimeofday_struct {
+ unsigned long tb_orig_stamp;
+ unsigned long tb_ticks_per_sec;
+ struct gettimeofday_vars vars[2];
+ struct gettimeofday_vars * volatile varp;
+ unsigned tb_to_us;
+};
+
+struct div_result {
+ unsigned long result_high;
+ unsigned long result_low;
+};
+
+int via_calibrate_decr(void);
+
+static __inline__ unsigned long get_tb(void)
+{
+ return mftb();
+}
+
+/* Accessor functions for the decrementer register. */
+static __inline__ unsigned int get_dec(void)
+{
+ return (mfspr(SPRN_DEC));
+}
+
+static __inline__ void set_dec(int val)
+{
+ struct Paca * paca;
+ int cur_dec;
+
+ paca = (struct Paca *)mfspr(SPRG3);
+ if ( paca->xLpPaca.xSharedProc ) {
+ paca->xLpPaca.xVirtualDecr = val;
+ cur_dec = get_dec();
+ if ( cur_dec > val )
+ HvCall_setVirtualDecr();
+ }
+ else
+ mtspr(SPRN_DEC, val);
+}
+
+extern __inline__ unsigned long tb_ticks_since(unsigned long tstamp) {
+ return get_tb() - tstamp;
+}
+
+#define mulhwu(x,y) \
+({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+#define mulhdu(x,y) \
+({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+
+
+unsigned mulhwu_scale_factor(unsigned, unsigned);
+void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
+ unsigned divisor, struct div_result *dr );
+#endif /* __KERNEL__ */
+#endif /* __PPC64_TIME_H */
--- /dev/null
+/*
+ * linux/include/asm-ppc/timex.h
+ *
+ * PPC64 architecture timex specifications
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASMPPC64_TIMEX_H
+#define _ASMPPC64_TIMEX_H
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+ (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+ << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+typedef unsigned long cycles_t;
+extern cycles_t cacheflush_time;
+
+static inline cycles_t get_cycles(void)
+{
+ cycles_t ret;
+
+ __asm__ __volatile__("mftb %0" : "=r" (ret) : );
+ return ret;
+}
+
+#endif
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#ifndef _PPC64_TYPES_H
+#define _PPC64_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+typedef unsigned int umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+
+typedef struct {
+ __u32 u[4];
+} __attribute((aligned(16))) __vector128;
+
+#ifdef __KERNEL__
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long s64;
+typedef unsigned long u64;
+
+typedef __vector128 vector128;
+
+#define BITS_PER_LONG 64
+
+typedef u32 dma_addr_t;
+typedef u64 dma64_addr_t;
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PPC64_TYPES_H */
--- /dev/null
+#ifndef _PPC64_UACCESS_H
+#define _PPC64_UACCESS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <asm/processor.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define KERNEL_DS ((mm_segment_t) { 0 })
+#define USER_DS ((mm_segment_t) { 1 })
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.fs)
+#define set_fs(val) (current->thread.fs = (val))
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
+#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+
+extern inline int verify_area(int type, const void * addr, unsigned long size)
+{
+ return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+extern void sort_exception_table(void);
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+#define get_user(x,ptr) \
+ __get_user_check((x),(ptr),sizeof(*(ptr)))
+#define put_user(x,ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __get_user(x,ptr) \
+ __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __put_user(x,ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x,ptr,size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x),(ptr),(size),__pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x,ptr,size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
+ __put_user_size((x),__pu_addr,(size),__pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x,ptr,retval,"stb"); break; \
+ case 2: __put_user_asm(x,ptr,retval,"sth"); break; \
+ case 4: __put_user_asm(x,ptr,retval,"stw"); break; \
+ case 8: __put_user_asm(x,ptr,retval,"std"); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2)\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .llong 1b,3b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "r"(x), "b"(addr), "i"(-EFAULT), "0"(err))
+
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val,(ptr),(size),__gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x,ptr,size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ,__gu_addr,size)) \
+ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x,ptr,retval,"lbz"); break; \
+ case 2: __get_user_asm(x,ptr,retval,"lhz"); break; \
+ case 4: __get_user_asm(x,ptr,retval,"lwz"); break; \
+ case 8: __get_user_asm(x,ptr,retval,"ld"); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2)\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " li %1,0\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .llong 1b,3b\n" \
+ ".previous" \
+ : "=r"(err), "=r"(x) \
+ : "b"(addr), "i"(-EFAULT), "0"(err))
+
+/* more complex routines */
+
+extern unsigned long __copy_tofrom_user(void *to, const void *from, unsigned long size);
+
+extern inline unsigned long
+copy_from_user(void *to, const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user(to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+ over = (unsigned long)from + n - TASK_SIZE;
+ return __copy_tofrom_user(to, from, n - over) + over;
+ }
+ return n;
+}
+
+extern inline unsigned long
+copy_to_user(void *to, const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+ over = (unsigned long)to + n - TASK_SIZE;
+ return __copy_tofrom_user(to, from, n - over) + over;
+ }
+ return n;
+}
+
+#define __copy_from_user(to, from, size) \
+ __copy_tofrom_user((to), (from), (size))
+#define __copy_to_user(to, from, size) \
+ __copy_tofrom_user((to), (from), (size))
+
+extern unsigned long __clear_user(void *addr, unsigned long size);
+
+extern inline unsigned long
+clear_user(void *addr, unsigned long size)
+{
+ if (access_ok(VERIFY_WRITE, addr, size))
+ return __clear_user(addr, size);
+ return size? -EFAULT: 0;
+}
+
+extern int __strncpy_from_user(char *dst, const char *src, long count);
+
+extern inline long
+strncpy_from_user(char *dst, const char *src, long count)
+{
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+
+extern int __strnlen_user(const char *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+extern __inline__ int strnlen_user(const char *str, long len)
+{
+ unsigned long top = __kernel_ok? ~0UL: TASK_SIZE - 1;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len, top);
+}
+
+#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PPC64_UACCESS_H */
--- /dev/null
+#ifndef _ASMPPC64_UCONTEXT_H
+#define _ASMPPC64_UCONTEXT_H
+
+/* Copied from i386.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext_struct uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* _ASMPPC64_UCONTEXT_H */
--- /dev/null
+#ifndef __UDBG_HDR
+#define __UDBG_HDR
+
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+void udbg_init_uart(void *comport);
+void udbg_putc(unsigned char c);
+unsigned char udbg_getc(void);
+int udbg_getc_poll(void);
+void udbg_puts(const char *s);
+int udbg_write(const char *s, int n);
+int udbg_read(char *buf, int buflen);
+void udbg_puthex(unsigned long val);
+void udbg_printSP(const char *s);
+void udbg_printf(const char *fmt, ...);
+void udbg_ppcdbg(unsigned long flags, const char *fmt, ...);
+unsigned long udbg_ifdebug(unsigned long flags);
+
+void udbg_init_uart(void *comport);
+#endif
--- /dev/null
+#ifndef __PPC64_UNALIGNED_H
+#define __PPC64_UNALIGNED_H
+
+/*
+ * The PowerPC can do unaligned accesses itself in big endian mode.
+ *
+ * The strange macros are there to make sure these can't
+ * be misused in a way that makes them not work on other
+ * architectures where unaligned accesses aren't as simple.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define get_unaligned(ptr) (*(ptr))
+
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+#endif /* __PPC64_UNALIGNED_H */
--- /dev/null
+#ifndef _ASM_PPC_UNISTD_H_
+#define _ASM_PPC_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_query_module 166
+#define __NR_poll 167
+#define __NR_nfsservctl 168
+#define __NR_setresgid 169
+#define __NR_getresgid 170
+#define __NR_prctl 171
+#define __NR_rt_sigreturn 172
+#define __NR_rt_sigaction 173
+#define __NR_rt_sigprocmask 174
+#define __NR_rt_sigpending 175
+#define __NR_rt_sigtimedwait 176
+#define __NR_rt_sigqueueinfo 177
+#define __NR_rt_sigsuspend 178
+#define __NR_pread 179
+#define __NR_pwrite 180
+#define __NR_chown 181
+#define __NR_getcwd 182
+#define __NR_capget 183
+#define __NR_capset 184
+#define __NR_sigaltstack 185
+#define __NR_sendfile 186
+#define __NR_getpmsg 187 /* some people actually want streams */
+#define __NR_putpmsg 188 /* some people actually want streams */
+#define __NR_vfork 189
+#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_pciconfig_read 198
+#define __NR_pciconfig_write 199
+#define __NR_pciconfig_iobase 200
+#define __NR_multiplexer 201
+#define __NR_getdents64 202
+#define __NR_pivot_root 203
+#define __NR_fcntl64 204
+#define __NR_madvise 205
+#define __NR_mincore 206
+#define __NR_gettid 207
+#define __NR_tkill 208
+
+#if 0 /* Remind paulus to add these into ppc32 */
+__NR_security
+__NR_readahead
+__NR_setxattr
+__NR_lsetxattr
+__NR_fsetxattr
+__NR_getxattr
+__NR_lgetxattr
+__NR_fgetxattr
+__NR_listxattr
+__NR_llistxattr
+__NR_flistxattr
+__NR_removexattr
+__NR_lremovexattr
+__NR_fremovexattr
+#endif
+
+#define __NR(n) #n
+
+
+#define __syscall_return(type) \
+ return (__sc_err & 0x10000000 ? errno = __sc_ret, __sc_ret = -1 : 0), \
+ (type) __sc_ret
+
+#define __syscall_clobbers \
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
+#ifdef __KERNEL_SYSCALLS__
+
+/*
+ * Forking from kernel space will result in the child getting a new,
+ * empty kernel stack area. Thus the child cannot access automatic
+ * variables set in the parent unless they are in registers, and the
+ * procedure where the fork was done cannot return to its caller in
+ * the child.
+ */
+
+/*
+ * System call prototypes.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,pause)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+static inline _syscall1(int,delete_module,const char *,name)
+
+static inline pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+
+#endif /* __KERNEL_SYSCALLS__ */
+
+#endif /* _ASM_PPC_UNISTD_H_ */
--- /dev/null
+#ifndef _PPC_USER_H
+#define _PPC_USER_H
+
+/* Adapted from <asm-alpha/user.h>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd, NOT the osf-core). The file contents
+ * are as follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ */
+struct user {
+ struct pt_regs regs; /* entire machine state */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ struct regs * u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _PPC_USER_H */
--- /dev/null
+#ifndef _LINUX_VC_IOCTL_H
+#define _LINUX_VC_IOCTL_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct vc_mode {
+ int height;
+ int width;
+ int depth;
+ int pitch;
+ int mode;
+ char name[32];
+ unsigned long fb_address;
+ unsigned long cmap_adr_address;
+ unsigned long cmap_data_address;
+ unsigned long disp_reg_address;
+};
+
+#define VC_GETMODE 0x7667
+#define VC_SETMODE 0x7668
+#define VC_INQMODE 0x7669
+
+#define VC_SETCMAP 0x766a
+#define VC_GETCMAP 0x766b
+
+#define VC_POWERMODE 0x766c
+
+/* Values for the argument to the VC_POWERMODE ioctl */
+#define VC_POWERMODE_INQUIRY (-1)
+#define VESA_NO_BLANKING 0
+#define VESA_VSYNC_SUSPEND 1
+#define VESA_HSYNC_SUSPEND 2
+#define VESA_POWERDOWN 3
+
+#ifdef __KERNEL__
+extern int console_getmode(struct vc_mode *);
+extern int console_setmode(struct vc_mode *, int);
+extern int console_setcmap(int, unsigned char *, unsigned char *,
+ unsigned char *);
+extern int console_powermode(int);
+extern struct vc_mode display_info;
+extern struct fb_info *console_fb_info;
+#endif
+
+#endif /* _LINUX_VC_IOCTL_H */
--- /dev/null
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+#include <asm/io.h>
+
+#include <linux/config.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+
+#define VT_BUF_HAVE_RW
+/*
+ * These are only needed for supporting VGA or MDA text mode, which use little
+ * endian byte ordering.
+ * In other cases, we can optimize by using native byte ordering and
+ * <linux/vt_buffer.h> has already done the right job for us.
+ */
+
+extern inline void scr_writew(u16 val, volatile u16 *addr)
+{
+ st_le16(addr, val);
+}
+
+extern inline u16 scr_readw(volatile const u16 *addr)
+{
+ return ld_le16(addr);
+}
+
+#define VT_BUF_HAVE_MEMCPYW
+#define scr_memcpyw memcpy
+
+#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
+
+extern unsigned long vgacon_remap_base;
+#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
--- /dev/null
+#include <asm-generic/xor.h>