]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 7 May 2007 19:34:57 +0000 (12:34 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 7 May 2007 19:34:57 +0000 (12:34 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] update memory attribute aliasing documentation & test cases
  [IA64] fail mmaps that span areas with incompatible attributes
  [IA64] allow WB /sys/.../legacy_mem mmaps
  [IA64] make ioremap avoid unsupported attributes
  [IA64] rename ioremap variables to match i386
  [IA64] relax per-cpu TLB requirement to DTC
  [IA64] remove per-cpu ia64_phys_stacked_size_p8
  [IA64] Fix example error injection program
  [IA64] Itanium MC Error Injection Tool: pal_mc_error_inject() interface
  [IA64] Itanium MC Error Injection Tool: Makefile changes
  [IA64] Itanium MC Error Injection Tool: Driver sysfs interface
  [IA64] Itanium MC Error Injection Tool: Doc and sample application
  [IA64] Itanium MC Error Injection Tool: Kernel configuration

24 files changed:
Documentation/ia64/aliasing-test.c [new file with mode: 0644]
Documentation/ia64/aliasing.txt
Documentation/ia64/err_inject.txt [new file with mode: 0644]
arch/ia64/Kconfig
arch/ia64/defconfig
arch/ia64/kernel/Makefile
arch/ia64/kernel/efi.c
arch/ia64/kernel/entry.S
arch/ia64/kernel/err_inject.c [new file with mode: 0644]
arch/ia64/kernel/ivt.S
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/patch.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/vmlinux.lds.S
arch/ia64/mm/init.c
arch/ia64/mm/ioremap.c
arch/ia64/pci/pci.c
include/asm-ia64/asmmacro.h
include/asm-ia64/io.h
include/asm-ia64/kregs.h
include/asm-ia64/pal.h
include/asm-ia64/patch.h
include/asm-ia64/processor.h
include/asm-ia64/sections.h

diff --git a/Documentation/ia64/aliasing-test.c b/Documentation/ia64/aliasing-test.c
new file mode 100644 (file)
index 0000000..3153167
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Exercise /dev/mem mmap cases that have been troublesome in the past
+ *
+ * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
+ *     Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <fnmatch.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+int sum;
+
+int map_mem(char *path, off_t offset, size_t length, int touch)
+{
+       int fd, rc;
+       void *addr;
+       int *c;
+
+       fd = open(path, O_RDWR);
+       if (fd == -1) {
+               perror(path);
+               return -1;
+       }
+
+       addr = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset);
+       if (addr == MAP_FAILED)
+               return 1;
+
+       if (touch) {
+               c = (int *) addr;
+               while (c < (int *) (offset + length))
+                       sum += *c++;
+       }
+
+       rc = munmap(addr, length);
+       if (rc == -1) {
+               perror("munmap");
+               return -1;
+       }
+
+       close(fd);
+       return 0;
+}
+
+int scan_sysfs(char *path, char *file, off_t offset, size_t length, int touch)
+{
+       struct dirent **namelist;
+       char *name, *path2;
+       int i, n, r, rc, result = 0;
+       struct stat buf;
+
+       n = scandir(path, &namelist, 0, alphasort);
+       if (n < 0) {
+               perror("scandir");
+               return -1;
+       }
+
+       for (i = 0; i < n; i++) {
+               name = namelist[i]->d_name;
+
+               if (fnmatch(".", name, 0) == 0)
+                       goto skip;
+               if (fnmatch("..", name, 0) == 0)
+                       goto skip;
+
+               path2 = malloc(strlen(path) + strlen(name) + 3);
+               strcpy(path2, path);
+               strcat(path2, "/");
+               strcat(path2, name);
+
+               if (fnmatch(file, name, 0) == 0) {
+                       rc = map_mem(path2, offset, length, touch);
+                       if (rc == 0)
+                               fprintf(stderr, "PASS: %s 0x%lx-0x%lx is %s\n", path2, offset, offset + length, touch ? "readable" : "mappable");
+                       else if (rc > 0)
+                               fprintf(stderr, "PASS: %s 0x%lx-0x%lx not mappable\n", path2, offset, offset + length);
+                       else {
+                               fprintf(stderr, "FAIL: %s 0x%lx-0x%lx not accessible\n", path2, offset, offset + length);
+                               return rc;
+                       }
+               } else {
+                       r = lstat(path2, &buf);
+                       if (r == 0 && S_ISDIR(buf.st_mode)) {
+                               rc = scan_sysfs(path2, file, offset, length, touch);
+                               if (rc < 0)
+                                       return rc;
+                       }
+               }
+
+               result |= rc;
+               free(path2);
+
+skip:
+               free(namelist[i]);
+       }
+       free(namelist);
+       return rc;
+}
+
+char buf[1024];
+
+int read_rom(char *path)
+{
+       int fd, rc;
+       size_t size = 0;
+
+       fd = open(path, O_RDWR);
+       if (fd == -1) {
+               perror(path);
+               return -1;
+       }
+
+       rc = write(fd, "1", 2);
+       if (rc <= 0) {
+               perror("write");
+               return -1;
+       }
+
+       do {
+               rc = read(fd, buf, sizeof(buf));
+               if (rc > 0)
+                       size += rc;
+       } while (rc > 0);
+
+       close(fd);
+       return size;
+}
+
+int scan_rom(char *path, char *file)
+{
+       struct dirent **namelist;
+       char *name, *path2;
+       int i, n, r, rc, result = 0;
+       struct stat buf;
+
+       n = scandir(path, &namelist, 0, alphasort);
+       if (n < 0) {
+               perror("scandir");
+               return -1;
+       }
+
+       for (i = 0; i < n; i++) {
+               name = namelist[i]->d_name;
+
+               if (fnmatch(".", name, 0) == 0)
+                       goto skip;
+               if (fnmatch("..", name, 0) == 0)
+                       goto skip;
+
+               path2 = malloc(strlen(path) + strlen(name) + 3);
+               strcpy(path2, path);
+               strcat(path2, "/");
+               strcat(path2, name);
+
+               if (fnmatch(file, name, 0) == 0) {
+                       rc = read_rom(path2);
+
+                       /*
+                        * It's OK if the ROM is unreadable.  Maybe there
+                        * is no ROM, or some other error ocurred.  The
+                        * important thing is that no MCA happened.
+                        */
+                       if (rc > 0)
+                               fprintf(stderr, "PASS: %s read %ld bytes\n", path2, rc);
+                       else {
+                               fprintf(stderr, "PASS: %s not readable\n", path2);
+                               return rc;
+                       }
+               } else {
+                       r = lstat(path2, &buf);
+                       if (r == 0 && S_ISDIR(buf.st_mode)) {
+                               rc = scan_rom(path2, file);
+                               if (rc < 0)
+                                       return rc;
+                       }
+               }
+
+               result |= rc;
+               free(path2);
+
+skip:
+               free(namelist[i]);
+       }
+       free(namelist);
+       return rc;
+}
+
+main()
+{
+       int rc;
+
+       if (map_mem("/dev/mem", 0, 0xA0000, 1) == 0)
+               fprintf(stderr, "PASS: /dev/mem 0x0-0xa0000 is readable\n");
+       else
+               fprintf(stderr, "FAIL: /dev/mem 0x0-0xa0000 not accessible\n");
+
+       /*
+        * It's not safe to blindly read the VGA frame buffer.  If you know
+        * how to poke the card the right way, it should respond, but it's
+        * not safe in general.  Many machines, e.g., Intel chipsets, cover
+        * up a non-responding card by just returning -1, but others will
+        * report the failure as a machine check.
+        */
+       if (map_mem("/dev/mem", 0xA0000, 0x20000, 0) == 0)
+               fprintf(stderr, "PASS: /dev/mem 0xa0000-0xc0000 is mappable\n");
+       else
+               fprintf(stderr, "FAIL: /dev/mem 0xa0000-0xc0000 not accessible\n");
+
+       if (map_mem("/dev/mem", 0xC0000, 0x40000, 1) == 0)
+               fprintf(stderr, "PASS: /dev/mem 0xc0000-0x100000 is readable\n");
+       else
+               fprintf(stderr, "FAIL: /dev/mem 0xc0000-0x100000 not accessible\n");
+
+       /*
+        * Often you can map all the individual pieces above (0-0xA0000,
+        * 0xA0000-0xC0000, and 0xC0000-0x100000), but can't map the whole
+        * thing at once.  This is because the individual pieces use different
+        * attributes, and there's no single attribute supported over the
+        * whole region.
+        */
+       rc = map_mem("/dev/mem", 0, 1024*1024, 0);
+       if (rc == 0)
+               fprintf(stderr, "PASS: /dev/mem 0x0-0x100000 is mappable\n");
+       else if (rc > 0)
+               fprintf(stderr, "PASS: /dev/mem 0x0-0x100000 not mappable\n");
+       else
+               fprintf(stderr, "FAIL: /dev/mem 0x0-0x100000 not accessible\n");
+
+       scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0, 0xA0000, 1);
+       scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0xA0000, 0x20000, 0);
+       scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0xC0000, 0x40000, 1);
+       scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0, 1024*1024, 0);
+
+       scan_rom("/sys/devices", "rom");
+}
index 38f9a52d182030eaf801ec494ef10f416c8c1a04..9a431a7d0f5d6c61d45687d580137f04878ca4a7 100644 (file)
@@ -112,16 +112,6 @@ POTENTIAL ATTRIBUTE ALIASING CASES
 
        The /dev/mem mmap constraints apply.
 
-       However, since this is for mapping legacy MMIO space, WB access
-       does not make sense.  This matters on machines without legacy
-       VGA support: these machines may have WB memory for the entire
-       first megabyte (or even the entire first granule).
-
-       On these machines, we could mmap legacy_mem as WB, which would
-       be safe in terms of attribute aliasing, but X has no way of
-       knowing that it is accessing regular memory, not a frame buffer,
-       so the kernel should fail the mmap rather than doing it with WB.
-
     read/write of /dev/mem
 
        This uses copy_from_user(), which implicitly uses a kernel
@@ -138,14 +128,20 @@ POTENTIAL ATTRIBUTE ALIASING CASES
 
     ioremap()
 
-       This returns a kernel identity mapping for use inside the
-       kernel.
+       This returns a mapping for use inside the kernel.
 
        If the region is in kern_memmap, we should use the attribute
-       specified there.  Otherwise, if the EFI memory map reports that
-       the entire granule supports WB, we should use that (granules
-       that are partially reserved or occupied by firmware do not appear
-       in kern_memmap).  Otherwise, we should use a UC mapping.
+       specified there.
+
+       If the EFI memory map reports that the entire granule supports
+       WB, we should use that (granules that are partially reserved
+       or occupied by firmware do not appear in kern_memmap).
+
+       If the granule contains non-WB memory, but we can cover the
+       region safely with kernel page table mappings, we can use
+       ioremap_page_range() as most other architectures do.
+
+       Failing all of the above, we have to fall back to a UC mapping.
 
 PAST PROBLEM CASES
 
@@ -158,7 +154,7 @@ PAST PROBLEM CASES
       succeed.  It may create either WB or UC user mappings, depending
       on whether the region is in kern_memmap or the EFI memory map.
 
-    mmap of 0x0-0xA0000 /dev/mem by "hwinfo" on HP sx1000 with VGA enabled
+    mmap of 0x0-0x9FFFF /dev/mem by "hwinfo" on HP sx1000 with VGA enabled
 
       See https://bugzilla.novell.com/show_bug.cgi?id=140858.
 
@@ -171,28 +167,25 @@ PAST PROBLEM CASES
       so it is safe to use WB mappings.
 
       The kernel VGA driver may ioremap the VGA frame buffer at 0xA0000,
-      which will use a granule-sized UC mapping covering 0-0xFFFFF.  This
-      granule covers some WB-only memory, but since UC is non-speculative,
-      the processor will never generate an uncacheable reference to the
-      WB-only areas unless the driver explicitly touches them.
+      which uses a granule-sized UC mapping.  This granule will cover some
+      WB-only memory, but since UC is non-speculative, the processor will
+      never generate an uncacheable reference to the WB-only areas unless
+      the driver explicitly touches them.
 
     mmap of 0x0-0xFFFFF legacy_mem by "X"
 
-      If the EFI memory map reports this entire range as WB, there
-      is no VGA MMIO hole, and the mmap should fail or be done with
-      a WB mapping.
+      If the EFI memory map reports that the entire range supports the
+      same attributes, we can allow the mmap (and we will prefer WB if
+      supported, as is the case with HP sx[12]000 machines with VGA
+      disabled).
 
-      There's no easy way for X to determine whether the 0xA0000-0xBFFFF
-      region is a frame buffer or just memory, so I think it's best to
-      just fail this mmap request rather than using a WB mapping.  As
-      far as I know, there's no need to map legacy_mem with WB
-      mappings.
+      If EFI reports the range as partly WB and partly UC (as on sx[12]000
+      machines with VGA enabled), we must fail the mmap because there's no
+      safe attribute to use.
 
-      Otherwise, a UC mapping of the entire region is probably safe.
-      The VGA hole means the region will not be in kern_memmap.  The
-      HP sx1000 chipset doesn't support UC access to the memory surrounding
-      the VGA hole, but X doesn't need that area anyway and should not
-      reference it.
+      If EFI reports some of the range but not all (as on Intel firmware
+      that doesn't report the VGA frame buffer at all), we should fail the
+      mmap and force the user to map just the specific region of interest.
 
     mmap of 0xA0000-0xBFFFF legacy_mem by "X" on HP sx1000 with VGA disabled
 
@@ -202,6 +195,16 @@ PAST PROBLEM CASES
       This is a special case of the previous case, and the mmap should
       fail for the same reason as above.
 
+    read of /sys/devices/.../rom
+
+      For VGA devices, this may cause an ioremap() of 0xC0000.  This
+      used to be done with a UC mapping, because the VGA frame buffer
+      at 0xA0000 prevents use of a WB granule.  The UC mapping causes
+      an MCA on HP sx[12]000 chipsets.
+
+      We should use WB page table mappings to avoid covering the VGA
+      frame buffer.
+
 NOTES
 
     [1] SDM rev 2.2, vol 2, sec 4.4.1.
diff --git a/Documentation/ia64/err_inject.txt b/Documentation/ia64/err_inject.txt
new file mode 100644 (file)
index 0000000..6449a70
--- /dev/null
@@ -0,0 +1,1068 @@
+
+IPF Machine Check (MC) error inject tool
+========================================
+
+IPF Machine Check (MC) error inject tool is used to inject MC
+errors from Linux. The tool is a test bed for IPF MC work flow including
+hardware correctable error handling, OS recoverable error handling, MC
+event logging, etc.
+
+The tool includes two parts: a kernel driver and a user application
+sample. The driver provides interface to PAL to inject error
+and query error injection capabilities. The driver code is in
+arch/ia64/kernel/err_inject.c. The application sample (shown below)
+provides a combination of various errors and calls the driver's interface
+(sysfs interface) to inject errors or query error injection capabilities.
+
+The tool can be used to test Intel IPF machine MC handling capabilities.
+It's especially useful for people who can not access hardware MC injection
+tool to inject error. It's also very useful to integrate with other
+software test suits to do stressful testing on IPF.
+
+Below is a sample application as part of the whole tool. The sample
+can be used as a working test tool. Or it can be expanded to include
+more features. It also can be a integrated into a libary or other user
+application to have more thorough test.
+
+The sample application takes err.conf as error configuation input. Gcc
+compiles the code. After you install err_inject driver, you can run
+this sample application to inject errors.
+
+Errata: Itanium 2 Processors Specification Update lists some errata against
+the pal_mc_error_inject PAL procedure. The following err.conf has been tested
+on latest Montecito PAL.
+
+err.conf:
+
+#This is configuration file for err_inject_tool.
+#The format of the each line is:
+#cpu, loop, interval, err_type_info, err_struct_info, err_data_buffer
+#where
+#      cpu: logical cpu number the error will be inject in.
+#      loop: times the error will be injected.
+#      interval: In second. every so often one error is injected.
+#      err_type_info, err_struct_info: PAL parameters.
+#
+#Note: All values are hex w/o or w/ 0x prefix.
+
+
+#On cpu2, inject only total 0x10 errors, interval 5 seconds
+#corrected, data cache, hier-2, physical addr(assigned by tool code).
+#working on Montecito latest PAL.
+2, 10, 5, 4101, 95
+
+#On cpu4, inject and consume total 0x10 errors, interval 5 seconds
+#corrected, data cache, hier-2, physical addr(assigned by tool code).
+#working on Montecito latest PAL.
+4, 10, 5, 4109, 95
+
+#On cpu15, inject and consume total 0x10 errors, interval 5 seconds
+#recoverable, DTR0, hier-2.
+#working on Montecito latest PAL.
+0xf, 0x10, 5, 4249, 15
+
+The sample application source code:
+
+err_injection_tool.c:
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright (C) 2006 Intel Co
+ *     Fenghua Yu <fenghua.yu@intel.com>
+ *
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sched.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/ipc.h>
+#include <sys/sem.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <sys/shm.h>
+
+#define MAX_FN_SIZE            256
+#define MAX_BUF_SIZE           256
+#define DATA_BUF_SIZE          256
+#define NR_CPUS                512
+#define MAX_TASK_NUM           2048
+#define MIN_INTERVAL           5       // seconds
+#define        ERR_DATA_BUFFER_SIZE    3       // Three 8-byte.
+#define PARA_FIELD_NUM         5
+#define MASK_SIZE              (NR_CPUS/64)
+#define PATH_FORMAT "/sys/devices/system/cpu/cpu%d/err_inject/"
+
+int sched_setaffinity(pid_t pid, unsigned int len, unsigned long *mask);
+
+int verbose;
+#define vbprintf if (verbose) printf
+
+int log_info(int cpu, const char *fmt, ...)
+{
+       FILE *log;
+       char fn[MAX_FN_SIZE];
+       char buf[MAX_BUF_SIZE];
+       va_list args;
+
+       sprintf(fn, "%d.log", cpu);
+       log=fopen(fn, "a+");
+       if (log==NULL) {
+               perror("Error open:");
+               return -1;
+       }
+
+       va_start(args, fmt);
+       vprintf(fmt, args);
+       memset(buf, 0, MAX_BUF_SIZE);
+       vsprintf(buf, fmt, args);
+       va_end(args);
+
+       fwrite(buf, sizeof(buf), 1, log);
+       fclose(log);
+
+       return 0;
+}
+
+typedef unsigned long u64;
+typedef unsigned int  u32;
+
+typedef union err_type_info_u {
+       struct {
+               u64     mode            : 3,    /* 0-2 */
+                       err_inj         : 3,    /* 3-5 */
+                       err_sev         : 2,    /* 6-7 */
+                       err_struct      : 5,    /* 8-12 */
+                       struct_hier     : 3,    /* 13-15 */
+                       reserved        : 48;   /* 16-63 */
+       } err_type_info_u;
+       u64     err_type_info;
+} err_type_info_t;
+
+typedef union err_struct_info_u {
+       struct {
+               u64     siv             : 1,    /* 0     */
+                       c_t             : 2,    /* 1-2   */
+                       cl_p            : 3,    /* 3-5   */
+                       cl_id           : 3,    /* 6-8   */
+                       cl_dp           : 1,    /* 9     */
+                       reserved1       : 22,   /* 10-31 */
+                       tiv             : 1,    /* 32    */
+                       trigger         : 4,    /* 33-36 */
+                       trigger_pl      : 3,    /* 37-39 */
+                       reserved2       : 24;   /* 40-63 */
+       } err_struct_info_cache;
+       struct {
+               u64     siv             : 1,    /* 0     */
+                       tt              : 2,    /* 1-2   */
+                       tc_tr           : 2,    /* 3-4   */
+                       tr_slot         : 8,    /* 5-12  */
+                       reserved1       : 19,   /* 13-31 */
+                       tiv             : 1,    /* 32    */
+                       trigger         : 4,    /* 33-36 */
+                       trigger_pl      : 3,    /* 37-39 */
+                       reserved2       : 24;   /* 40-63 */
+       } err_struct_info_tlb;
+       struct {
+               u64     siv             : 1,    /* 0     */
+                       regfile_id      : 4,    /* 1-4   */
+                       reg_num         : 7,    /* 5-11  */
+                       reserved1       : 20,   /* 12-31 */
+                       tiv             : 1,    /* 32    */
+                       trigger         : 4,    /* 33-36 */
+                       trigger_pl      : 3,    /* 37-39 */
+                       reserved2       : 24;   /* 40-63 */
+       } err_struct_info_register;
+       struct {
+               u64     reserved;
+       } err_struct_info_bus_processor_interconnect;
+       u64     err_struct_info;
+} err_struct_info_t;
+
+typedef union err_data_buffer_u {
+       struct {
+               u64     trigger_addr;           /* 0-63         */
+               u64     inj_addr;               /* 64-127       */
+               u64     way             : 5,    /* 128-132      */
+                       index           : 20,   /* 133-152      */
+                                       : 39;   /* 153-191      */
+       } err_data_buffer_cache;
+       struct {
+               u64     trigger_addr;           /* 0-63         */
+               u64     inj_addr;               /* 64-127       */
+               u64     way             : 5,    /* 128-132      */
+                       index           : 20,   /* 133-152      */
+                       reserved        : 39;   /* 153-191      */
+       } err_data_buffer_tlb;
+       struct {
+               u64     trigger_addr;           /* 0-63         */
+       } err_data_buffer_register;
+       struct {
+               u64     reserved;               /* 0-63         */
+       } err_data_buffer_bus_processor_interconnect;
+       u64 err_data_buffer[ERR_DATA_BUFFER_SIZE];
+} err_data_buffer_t;
+
+typedef union capabilities_u {
+       struct {
+               u64     i               : 1,
+                       d               : 1,
+                       rv              : 1,
+                       tag             : 1,
+                       data            : 1,
+                       mesi            : 1,
+                       dp              : 1,
+                       reserved1       : 3,
+                       pa              : 1,
+                       va              : 1,
+                       wi              : 1,
+                       reserved2       : 20,
+                       trigger         : 1,
+                       trigger_pl      : 1,
+                       reserved3       : 30;
+       } capabilities_cache;
+       struct {
+               u64     d               : 1,
+                       i               : 1,
+                       rv              : 1,
+                       tc              : 1,
+                       tr              : 1,
+                       reserved1       : 27,
+                       trigger         : 1,
+                       trigger_pl      : 1,
+                       reserved2       : 30;
+       } capabilities_tlb;
+       struct {
+               u64     gr_b0           : 1,
+                       gr_b1           : 1,
+                       fr              : 1,
+                       br              : 1,
+                       pr              : 1,
+                       ar              : 1,
+                       cr              : 1,
+                       rr              : 1,
+                       pkr             : 1,
+                       dbr             : 1,
+                       ibr             : 1,
+                       pmc             : 1,
+                       pmd             : 1,
+                       reserved1       : 3,
+                       regnum          : 1,
+                       reserved2       : 15,
+                       trigger         : 1,
+                       trigger_pl      : 1,
+                       reserved3       : 30;
+       } capabilities_register;
+       struct {
+               u64     reserved;
+       } capabilities_bus_processor_interconnect;
+} capabilities_t;
+
+typedef struct resources_s {
+       u64     ibr0            : 1,
+               ibr2            : 1,
+               ibr4            : 1,
+               ibr6            : 1,
+               dbr0            : 1,
+               dbr2            : 1,
+               dbr4            : 1,
+               dbr6            : 1,
+               reserved        : 48;
+} resources_t;
+
+
+long get_page_size(void)
+{
+       long page_size=sysconf(_SC_PAGESIZE);
+       return page_size;
+}
+
+#define PAGE_SIZE (get_page_size()==-1?0x4000:get_page_size())
+#define SHM_SIZE (2*PAGE_SIZE*NR_CPUS)
+#define SHM_VA 0x2000000100000000
+
+int shmid;
+void *shmaddr;
+
+int create_shm(void)
+{
+       key_t key;
+       char fn[MAX_FN_SIZE];
+
+       /* cpu0 is always existing */
+       sprintf(fn, PATH_FORMAT, 0);
+       if ((key = ftok(fn, 's')) == -1) {
+               perror("ftok");
+               return -1;
+       }
+
+       shmid = shmget(key, SHM_SIZE, 0644 | IPC_CREAT);
+       if (shmid == -1) {
+               if (errno==EEXIST) {
+                       shmid = shmget(key, SHM_SIZE, 0);
+                       if (shmid == -1) {
+                               perror("shmget");
+                               return -1;
+                       }
+               }
+               else {
+                       perror("shmget");
+                       return -1;
+               }
+       }
+       vbprintf("shmid=%d", shmid);
+
+       /* connect to the segment: */
+       shmaddr = shmat(shmid, (void *)SHM_VA, 0);
+       if (shmaddr == (void*)-1) {
+               perror("shmat");
+               return -1;
+       }
+
+       memset(shmaddr, 0, SHM_SIZE);
+       mlock(shmaddr, SHM_SIZE);
+
+       return 0;
+}
+
+int free_shm()
+{
+       munlock(shmaddr, SHM_SIZE);
+        shmdt(shmaddr);
+       semctl(shmid, 0, IPC_RMID);
+
+       return 0;
+}
+
+#ifdef _SEM_SEMUN_UNDEFINED
+union semun
+{
+       int val;
+       struct semid_ds *buf;
+       unsigned short int *array;
+       struct seminfo *__buf;
+};
+#endif
+
+u32 mode=1; /* 1: physical mode; 2: virtual mode. */
+int one_lock=1;
+key_t key[NR_CPUS];
+int semid[NR_CPUS];
+
+int create_sem(int cpu)
+{
+       union semun arg;
+       char fn[MAX_FN_SIZE];
+       int sid;
+
+       sprintf(fn, PATH_FORMAT, cpu);
+       sprintf(fn, "%s/%s", fn, "err_type_info");
+       if ((key[cpu] = ftok(fn, 'e')) == -1) {
+               perror("ftok");
+               return -1;
+       }
+
+       if (semid[cpu]!=0)
+               return 0;
+
+       /* clear old semaphore */
+       if ((sid = semget(key[cpu], 1, 0)) != -1)
+               semctl(sid, 0, IPC_RMID);
+
+       /* get one semaphore */
+       if ((semid[cpu] = semget(key[cpu], 1, IPC_CREAT | IPC_EXCL)) == -1) {
+               perror("semget");
+               printf("Please remove semaphore with key=0x%lx, then run the tool.\n",
+                       (u64)key[cpu]);
+               return -1;
+       }
+
+       vbprintf("semid[%d]=0x%lx, key[%d]=%lx\n",cpu,(u64)semid[cpu],cpu,
+               (u64)key[cpu]);
+       /* initialize the semaphore to 1: */
+       arg.val = 1;
+       if (semctl(semid[cpu], 0, SETVAL, arg) == -1) {
+               perror("semctl");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int lock(int cpu)
+{
+       struct sembuf lock;
+
+       lock.sem_num = cpu;
+       lock.sem_op = 1;
+       semop(semid[cpu], &lock, 1);
+
+        return 0;
+}
+
+static int unlock(int cpu)
+{
+       struct sembuf unlock;
+
+       unlock.sem_num = cpu;
+       unlock.sem_op = -1;
+       semop(semid[cpu], &unlock, 1);
+
+        return 0;
+}
+
+void free_sem(int cpu)
+{
+       semctl(semid[cpu], 0, IPC_RMID);
+}
+
+int wr_multi(char *fn, unsigned long *data, int size)
+{
+       int fd;
+       char buf[MAX_BUF_SIZE];
+       int ret;
+
+       if (size==1)
+               sprintf(buf, "%lx", *data);
+       else if (size==3)
+               sprintf(buf, "%lx,%lx,%lx", data[0], data[1], data[2]);
+       else {
+               fprintf(stderr,"write to file with wrong size!\n");
+               return -1;
+       }
+
+       fd=open(fn, O_RDWR);
+       if (!fd) {
+               perror("Error:");
+               return -1;
+       }
+       ret=write(fd, buf, sizeof(buf));
+       close(fd);
+       return ret;
+}
+
+int wr(char *fn, unsigned long data)
+{
+       return wr_multi(fn, &data, 1);
+}
+
+int rd(char *fn, unsigned long *data)
+{
+       int fd;
+       char buf[MAX_BUF_SIZE];
+
+       fd=open(fn, O_RDONLY);
+       if (fd<0) {
+               perror("Error:");
+               return -1;
+       }
+       read(fd, buf, MAX_BUF_SIZE);
+       *data=strtoul(buf, NULL, 16);
+       close(fd);
+       return 0;
+}
+
+int rd_status(char *path, int *status)
+{
+       char fn[MAX_FN_SIZE];
+       sprintf(fn, "%s/status", path);
+       if (rd(fn, (u64*)status)<0) {
+               perror("status reading error.\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+int rd_capabilities(char *path, u64 *capabilities)
+{
+       char fn[MAX_FN_SIZE];
+       sprintf(fn, "%s/capabilities", path);
+       if (rd(fn, capabilities)<0) {
+               perror("capabilities reading error.\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+int rd_all(char *path)
+{
+       unsigned long err_type_info, err_struct_info, err_data_buffer;
+       int status;
+       unsigned long capabilities, resources;
+       char fn[MAX_FN_SIZE];
+
+       sprintf(fn, "%s/err_type_info", path);
+       if (rd(fn, &err_type_info)<0) {
+               perror("err_type_info reading error.\n");
+               return -1;
+       }
+       printf("err_type_info=%lx\n", err_type_info);
+
+       sprintf(fn, "%s/err_struct_info", path);
+       if (rd(fn, &err_struct_info)<0) {
+               perror("err_struct_info reading error.\n");
+               return -1;
+       }
+       printf("err_struct_info=%lx\n", err_struct_info);
+
+       sprintf(fn, "%s/err_data_buffer", path);
+       if (rd(fn, &err_data_buffer)<0) {
+               perror("err_data_buffer reading error.\n");
+               return -1;
+       }
+       printf("err_data_buffer=%lx\n", err_data_buffer);
+
+       sprintf(fn, "%s/status", path);
+       if (rd("status", (u64*)&status)<0) {
+               perror("status reading error.\n");
+               return -1;
+       }
+       printf("status=%d\n", status);
+
+       sprintf(fn, "%s/capabilities", path);
+       if (rd(fn,&capabilities)<0) {
+               perror("capabilities reading error.\n");
+               return -1;
+       }
+       printf("capabilities=%lx\n", capabilities);
+
+       sprintf(fn, "%s/resources", path);
+       if (rd(fn, &resources)<0) {
+               perror("resources reading error.\n");
+               return -1;
+       }
+       printf("resources=%lx\n", resources);
+
+       return 0;
+}
+
+int query_capabilities(char *path, err_type_info_t err_type_info,
+                       u64 *capabilities)
+{
+       char fn[MAX_FN_SIZE];
+       err_struct_info_t err_struct_info;
+       err_data_buffer_t err_data_buffer;
+
+       err_struct_info.err_struct_info=0;
+       memset(err_data_buffer.err_data_buffer, -1, ERR_DATA_BUFFER_SIZE*8);
+
+       sprintf(fn, "%s/err_type_info", path);
+       wr(fn, err_type_info.err_type_info);
+       sprintf(fn, "%s/err_struct_info", path);
+       wr(fn, 0x0);
+       sprintf(fn, "%s/err_data_buffer", path);
+       wr_multi(fn, err_data_buffer.err_data_buffer, ERR_DATA_BUFFER_SIZE);
+
+       // Fire pal_mc_error_inject procedure.
+       sprintf(fn, "%s/call_start", path);
+       wr(fn, mode);
+
+       if (rd_capabilities(path, capabilities)<0)
+               return -1;
+
+       return 0;
+}
+
+int query_all_capabilities()
+{
+       int status;
+       err_type_info_t err_type_info;
+       int err_sev, err_struct, struct_hier;
+       int cap=0;
+       u64 capabilities;
+       char path[MAX_FN_SIZE];
+
+       err_type_info.err_type_info=0;                  // Initial
+       err_type_info.err_type_info_u.mode=0;           // Query mode;
+       err_type_info.err_type_info_u.err_inj=0;
+
+       printf("All capabilities implemented in pal_mc_error_inject:\n");
+       sprintf(path, PATH_FORMAT ,0);
+       for (err_sev=0;err_sev<3;err_sev++)
+               for (err_struct=0;err_struct<5;err_struct++)
+                       for (struct_hier=0;struct_hier<5;struct_hier++)
+       {
+               status=-1;
+               capabilities=0;
+               err_type_info.err_type_info_u.err_sev=err_sev;
+               err_type_info.err_type_info_u.err_struct=err_struct;
+               err_type_info.err_type_info_u.struct_hier=struct_hier;
+
+               if (query_capabilities(path, err_type_info, &capabilities)<0)
+                       continue;
+
+               if (rd_status(path, &status)<0)
+                       continue;
+
+               if (status==0) {
+                       cap=1;
+                       printf("For err_sev=%d, err_struct=%d, struct_hier=%d: ",
+                               err_sev, err_struct, struct_hier);
+                       printf("capabilities 0x%lx\n", capabilities);
+               }
+       }
+       if (!cap) {
+               printf("No capabilities supported.\n");
+               return 0;
+       }
+
+       return 0;
+}
+
+int err_inject(int cpu, char *path, err_type_info_t err_type_info,
+               err_struct_info_t err_struct_info,
+               err_data_buffer_t err_data_buffer)
+{
+       int status;
+       char fn[MAX_FN_SIZE];
+
+       log_info(cpu, "err_type_info=%lx, err_struct_info=%lx, ",
+               err_type_info.err_type_info,
+               err_struct_info.err_struct_info);
+       log_info(cpu,"err_data_buffer=[%lx,%lx,%lx]\n",
+               err_data_buffer.err_data_buffer[0],
+               err_data_buffer.err_data_buffer[1],
+               err_data_buffer.err_data_buffer[2]);
+       sprintf(fn, "%s/err_type_info", path);
+       wr(fn, err_type_info.err_type_info);
+       sprintf(fn, "%s/err_struct_info", path);
+       wr(fn, err_struct_info.err_struct_info);
+       sprintf(fn, "%s/err_data_buffer", path);
+       wr_multi(fn, err_data_buffer.err_data_buffer, ERR_DATA_BUFFER_SIZE);
+
+       // Fire pal_mc_error_inject procedure.
+       sprintf(fn, "%s/call_start", path);
+       wr(fn,mode);
+
+       if (rd_status(path, &status)<0) {
+               vbprintf("fail: read status\n");
+               return -100;
+       }
+
+       if (status!=0) {
+               log_info(cpu, "fail: status=%d\n", status);
+               return status;
+       }
+
+       return status;
+}
+
+static int construct_data_buf(char *path, err_type_info_t err_type_info,
+               err_struct_info_t err_struct_info,
+               err_data_buffer_t *err_data_buffer,
+               void *va1)
+{
+       char fn[MAX_FN_SIZE];
+       u64 virt_addr=0, phys_addr=0;
+
+       vbprintf("va1=%lx\n", (u64)va1);
+       memset(&err_data_buffer->err_data_buffer_cache, 0, ERR_DATA_BUFFER_SIZE*8);
+
+       switch (err_type_info.err_type_info_u.err_struct) {
+               case 1: // Cache
+                       switch (err_struct_info.err_struct_info_cache.cl_id) {
+                               case 1: //Virtual addr
+                                       err_data_buffer->err_data_buffer_cache.inj_addr=(u64)va1;
+                                       break;
+                               case 2: //Phys addr
+                                       sprintf(fn, "%s/virtual_to_phys", path);
+                                       virt_addr=(u64)va1;
+                                       if (wr(fn,virt_addr)<0)
+                                               return -1;
+                                       rd(fn, &phys_addr);
+                                       err_data_buffer->err_data_buffer_cache.inj_addr=phys_addr;
+                                       break;
+                               default:
+                                       printf("Not supported cl_id\n");
+                                       break;
+                       }
+                       break;
+               case 2: //  TLB
+                       break;
+               case 3: //  Register file
+                       break;
+               case 4: //  Bus/system interconnect
+               default:
+                       printf("Not supported err_struct\n");
+                       break;
+       }
+
+       return 0;
+}
+
+typedef struct {
+       u64 cpu;
+       u64 loop;
+       u64 interval;
+       u64 err_type_info;
+       u64 err_struct_info;
+       u64 err_data_buffer[ERR_DATA_BUFFER_SIZE];
+} parameters_t;
+
+parameters_t line_para;
+int para;
+
+static int empty_data_buffer(u64 *err_data_buffer)
+{
+       int empty=1;
+       int i;
+
+       for (i=0;i<ERR_DATA_BUFFER_SIZE; i++)
+          if (err_data_buffer[i]!=-1)
+               empty=0;
+
+       return empty;
+}
+
+int err_inj()
+{
+       err_type_info_t err_type_info;
+       err_struct_info_t err_struct_info;
+       err_data_buffer_t err_data_buffer;
+       int count;
+       FILE *fp;
+       unsigned long cpu, loop, interval, err_type_info_conf, err_struct_info_conf;
+       u64 err_data_buffer_conf[ERR_DATA_BUFFER_SIZE];
+       int num;
+       int i;
+       char path[MAX_FN_SIZE];
+       parameters_t parameters[MAX_TASK_NUM]={};
+       pid_t child_pid[MAX_TASK_NUM];
+       time_t current_time;
+       int status;
+
+       if (!para) {
+           fp=fopen("err.conf", "r");
+           if (fp==NULL) {
+               perror("Error open err.conf");
+               return -1;
+           }
+
+           num=0;
+           while (!feof(fp)) {
+               char buf[256];
+               memset(buf,0,256);
+               fgets(buf, 256, fp);
+               count=sscanf(buf, "%lx, %lx, %lx, %lx, %lx, %lx, %lx, %lx\n",
+                               &cpu, &loop, &interval,&err_type_info_conf,
+                               &err_struct_info_conf,
+                               &err_data_buffer_conf[0],
+                               &err_data_buffer_conf[1],
+                               &err_data_buffer_conf[2]);
+               if (count!=PARA_FIELD_NUM+3) {
+                       err_data_buffer_conf[0]=-1;
+                       err_data_buffer_conf[1]=-1;
+                       err_data_buffer_conf[2]=-1;
+                       count=sscanf(buf, "%lx, %lx, %lx, %lx, %lx\n",
+                               &cpu, &loop, &interval,&err_type_info_conf,
+                               &err_struct_info_conf);
+                       if (count!=PARA_FIELD_NUM)
+                               continue;
+               }
+
+               parameters[num].cpu=cpu;
+               parameters[num].loop=loop;
+               parameters[num].interval= interval>MIN_INTERVAL
+                                         ?interval:MIN_INTERVAL;
+               parameters[num].err_type_info=err_type_info_conf;
+               parameters[num].err_struct_info=err_struct_info_conf;
+               memcpy(parameters[num++].err_data_buffer,
+                       err_data_buffer_conf,ERR_DATA_BUFFER_SIZE*8) ;
+
+               if (num>=MAX_TASK_NUM)
+                       break;
+           }
+       }
+       else {
+               parameters[0].cpu=line_para.cpu;
+               parameters[0].loop=line_para.loop;
+               parameters[0].interval= line_para.interval>MIN_INTERVAL
+                                         ?line_para.interval:MIN_INTERVAL;
+               parameters[0].err_type_info=line_para.err_type_info;
+               parameters[0].err_struct_info=line_para.err_struct_info;
+               memcpy(parameters[0].err_data_buffer,
+                       line_para.err_data_buffer,ERR_DATA_BUFFER_SIZE*8) ;
+
+               num=1;
+       }
+
+       /* Create semaphore: If one_lock, one semaphore for all processors.
+          Otherwise, one sempaphore for each processor. */
+       if (one_lock) {
+               if (create_sem(0)) {
+                       printf("Can not create semaphore...exit\n");
+                       free_sem(0);
+                       return -1;
+               }
+       }
+       else {
+               for (i=0;i<num;i++) {
+                  if (create_sem(parameters[i].cpu)) {
+                       printf("Can not create semaphore for cpu%d...exit\n",i);
+                       free_sem(parameters[num].cpu);
+                       return -1;
+                  }
+               }
+       }
+
+       /* Create a shm segment which will be used to inject/consume errors on.*/
+       if (create_shm()==-1) {
+               printf("Error to create shm...exit\n");
+               return -1;
+       }
+
+       for (i=0;i<num;i++) {
+               pid_t pid;
+
+               current_time=time(NULL);
+               log_info(parameters[i].cpu, "\nBegine at %s", ctime(&current_time));
+               log_info(parameters[i].cpu, "Configurations:\n");
+               log_info(parameters[i].cpu,"On cpu%ld: loop=%lx, interval=%lx(s)",
+                       parameters[i].cpu,
+                       parameters[i].loop,
+                       parameters[i].interval);
+               log_info(parameters[i].cpu," err_type_info=%lx,err_struct_info=%lx\n",
+                       parameters[i].err_type_info,
+                       parameters[i].err_struct_info);
+
+               sprintf(path, PATH_FORMAT, (int)parameters[i].cpu);
+               err_type_info.err_type_info=parameters[i].err_type_info;
+               err_struct_info.err_struct_info=parameters[i].err_struct_info;
+               memcpy(err_data_buffer.err_data_buffer,
+                       parameters[i].err_data_buffer,
+                       ERR_DATA_BUFFER_SIZE*8);
+
+               pid=fork();
+               if (pid==0) {
+                       unsigned long mask[MASK_SIZE];
+                       int j, k;
+
+                       void *va1, *va2;
+
+                       /* Allocate two memory areas va1 and va2 in shm */
+                       va1=shmaddr+parameters[i].cpu*PAGE_SIZE;
+                       va2=shmaddr+parameters[i].cpu*PAGE_SIZE+PAGE_SIZE;
+
+                       vbprintf("va1=%lx, va2=%lx\n", (u64)va1, (u64)va2);
+                       memset(va1, 0x1, PAGE_SIZE);
+                       memset(va2, 0x2, PAGE_SIZE);
+
+                       if (empty_data_buffer(err_data_buffer.err_data_buffer))
+                               /* If not specified yet, construct data buffer
+                                * with va1
+                                */
+                               construct_data_buf(path, err_type_info,
+                                       err_struct_info, &err_data_buffer,va1);
+
+                       for (j=0;j<MASK_SIZE;j++)
+                               mask[j]=0;
+
+                       cpu=parameters[i].cpu;
+                       k = cpu%64;
+                       j = cpu/64;
+                       mask[j]=1<<k;
+
+                       if (sched_setaffinity(0, MASK_SIZE*8, mask)==-1) {
+                               perror("Error sched_setaffinity:");
+                               return -1;
+                       }
+
+                       for (j=0; j<parameters[i].loop; j++) {
+                               log_info(parameters[i].cpu,"Injection ");
+                               log_info(parameters[i].cpu,"on cpu%ld: #%d/%ld ",
+
+                                       parameters[i].cpu,j+1, parameters[i].loop);
+
+                               /* Hold the lock */
+                               if (one_lock)
+                                       lock(0);
+                               else
+                               /* Hold lock on this cpu */
+                                       lock(parameters[i].cpu);
+
+                               if ((status=err_inject(parameters[i].cpu,
+                                          path, err_type_info,
+                                          err_struct_info, err_data_buffer))
+                                          ==0) {
+                                       /* consume the error for "inject only"*/
+                                       memcpy(va2, va1, PAGE_SIZE);
+                                       memcpy(va1, va2, PAGE_SIZE);
+                                       log_info(parameters[i].cpu,
+                                               "successful\n");
+                               }
+                               else {
+                                       log_info(parameters[i].cpu,"fail:");
+                                       log_info(parameters[i].cpu,
+                                               "status=%d\n", status);
+                                       unlock(parameters[i].cpu);
+                                       break;
+                               }
+                               if (one_lock)
+                               /* Release the lock */
+                                       unlock(0);
+                               /* Release lock on this cpu */
+                               else
+                                       unlock(parameters[i].cpu);
+
+                               if (j < parameters[i].loop-1)
+                                       sleep(parameters[i].interval);
+                       }
+                       current_time=time(NULL);
+                       log_info(parameters[i].cpu, "Done at %s", ctime(&current_time));
+                       return 0;
+               }
+               else if (pid<0) {
+                       perror("Error fork:");
+                       continue;
+               }
+               child_pid[i]=pid;
+       }
+       for (i=0;i<num;i++)
+               waitpid(child_pid[i], NULL, 0);
+
+       if (one_lock)
+               free_sem(0);
+       else
+               for (i=0;i<num;i++)
+                       free_sem(parameters[i].cpu);
+
+       printf("All done.\n");
+
+       return 0;
+}
+
+void help()
+{
+       printf("err_inject_tool:\n");
+       printf("\t-q: query all capabilities. default: off\n");
+       printf("\t-m: procedure mode. 1: physical 2: virtual. default: 1\n");
+       printf("\t-i: inject errors. default: off\n");
+       printf("\t-l: one lock per cpu. default: one lock for all\n");
+       printf("\t-e: error parameters:\n");
+       printf("\t\tcpu,loop,interval,err_type_info,err_struct_info[,err_data_buffer[0],err_data_buffer[1],err_data_buffer[2]]\n");
+       printf("\t\t   cpu: logical cpu number the error will be inject in.\n");
+       printf("\t\t   loop: times the error will be injected.\n");
+       printf("\t\t   interval: In second. every so often one error is injected.\n");
+       printf("\t\t   err_type_info, err_struct_info: PAL parameters.\n");
+       printf("\t\t   err_data_buffer: PAL parameter. Optional. If not present,\n");
+       printf("\t\t                    it's constructed by tool automatically. Be\n");
+       printf("\t\t                    careful to provide err_data_buffer and make\n");
+       printf("\t\t                    sure it's working with the environment.\n");
+       printf("\t    Note:no space between error parameters.\n");
+       printf("\t    default: Take error parameters from err.conf instead of command line.\n");
+       printf("\t-v: verbose. default: off\n");
+       printf("\t-h: help\n\n");
+       printf("The tool will take err.conf file as ");
+       printf("input to inject single or multiple errors ");
+       printf("on one or multiple cpus in parallel.\n");
+}
+
+int main(int argc, char **argv)
+{
+       char c;
+       int do_err_inj=0;
+       int do_query_all=0;
+       int count;
+       u32 m;
+
+       /* Default one lock for all cpu's */
+       one_lock=1;
+       while ((c = getopt(argc, argv, "m:iqvhle:")) != EOF)
+               switch (c) {
+                       case 'm':       /* Procedure mode. 1: phys 2: virt */
+                               count=sscanf(optarg, "%x", &m);
+                               if (count!=1 || (m!=1 && m!=2)) {
+                                       printf("Wrong mode number.\n");
+                                       help();
+                                       return -1;
+                               }
+                               mode=m;
+                               break;
+                       case 'i':       /* Inject errors */
+                               do_err_inj=1;
+                               break;
+                       case 'q':       /* Query */
+                               do_query_all=1;
+                               break;
+                       case 'v':       /* Verbose */
+                               verbose=1;
+                               break;
+                       case 'l':       /* One lock per cpu */
+                               one_lock=0;
+                               break;
+                       case 'e':       /* error arguments */
+                               /* Take parameters:
+                                * #cpu, loop, interval, err_type_info, err_struct_info[, err_data_buffer]
+                                * err_data_buffer is optional. Recommend not to specify
+                                * err_data_buffer. Better to use tool to generate it.
+                                */
+                               count=sscanf(optarg,
+                                       "%lx, %lx, %lx, %lx, %lx, %lx, %lx, %lx\n",
+                                       &line_para.cpu,
+                                       &line_para.loop,
+                                       &line_para.interval,
+                                       &line_para.err_type_info,
+                                       &line_para.err_struct_info,
+                                       &line_para.err_data_buffer[0],
+                                       &line_para.err_data_buffer[1],
+                                       &line_para.err_data_buffer[2]);
+                               if (count!=PARA_FIELD_NUM+3) {
+                                   line_para.err_data_buffer[0]=-1,
+                                   line_para.err_data_buffer[1]=-1,
+                                   line_para.err_data_buffer[2]=-1;
+                                   count=sscanf(optarg, "%lx, %lx, %lx, %lx, %lx\n",
+                                               &line_para.cpu,
+                                               &line_para.loop,
+                                               &line_para.interval,
+                                               &line_para.err_type_info,
+                                               &line_para.err_struct_info);
+                                   if (count!=PARA_FIELD_NUM) {
+                                       printf("Wrong error arguments.\n");
+                                       help();
+                                       return -1;
+                                   }
+                               }
+                               para=1;
+                               break;
+                       continue;
+                               break;
+                       case 'h':
+                               help();
+                               return 0;
+                       default:
+                               break;
+               }
+
+       if (do_query_all)
+               query_all_capabilities();
+       if (do_err_inj)
+               err_inj();
+
+       if (!do_query_all &&  !do_err_inj)
+               help();
+
+       return 0;
+}
+
index 3b71f97d0b603a7a3b112e9ea1561eab06e6265f..e23af4b6ae8c173ee20b456805bd556ac86a83fb 100644 (file)
@@ -439,6 +439,16 @@ config IA64_PALINFO
          To use this option, you have to ensure that the "/proc file system
          support" (CONFIG_PROC_FS) is enabled, too.
 
+config IA64_MC_ERR_INJECT
+       tristate "MC error injection support"
+       help
+         Selets whether support for MC error injection. By enabling the
+         support, kernel provide sysfs interface for user application to
+         call MC error injection PAL procedure to inject various errors.
+         This is a useful tool for MCA testing.
+
+         If you're unsure, do not select this option.
+
 config SGI_SN
        def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
 
index 153bfdc0182d6edf2cf6f3738feacf14e4119bad..90bd9601cddef047062a594be3e682b31548fcce 100644 (file)
@@ -164,6 +164,7 @@ CONFIG_COMPAT=y
 CONFIG_IA64_MCA_RECOVERY=y
 CONFIG_PERFMON=y
 CONFIG_IA64_PALINFO=y
+# CONFIG_MC_ERR_INJECT is not set
 CONFIG_SGI_SN=y
 # CONFIG_IA64_ESI is not set
 
index 098ee605bf5ee2f332f9a4ce4e1497ede19693d0..33e5a598672db019bd88422a8c05277ec0320fd3 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
 obj-$(CONFIG_AUDIT)            += audit.o
 obj-$(CONFIG_PCI_MSI)          += msi_ia64.o
 mca_recovery-y                 += mca_drv.o mca_drv_asm.o
+obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
 
 obj-$(CONFIG_IA64_ESI)         += esi.o
 ifneq ($(CONFIG_IA64_ESI),)
index f45f91d38caba314092a9e87173036ca1318dbec..78d29b79947dc3223a1e9c47e7975b71a90b5862 100644 (file)
@@ -660,6 +660,29 @@ efi_memory_descriptor (unsigned long phys_addr)
        return NULL;
 }
 
+static int
+efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+       unsigned long end;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       end = phys_addr + size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (md->phys_addr < end && efi_md_end(md) > phys_addr)
+                       return 1;
+       }
+       return 0;
+}
+
 u32
 efi_mem_type (unsigned long phys_addr)
 {
@@ -766,11 +789,28 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
 int
 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
 {
+       unsigned long phys_addr = pfn << PAGE_SHIFT;
+       u64 attr;
+
+       attr = efi_mem_attribute(phys_addr, size);
+
        /*
-        * MMIO regions are often missing from the EFI memory map.
-        * We must allow mmap of them for programs like X, so we
-        * currently can't do any useful validation.
+        * /dev/mem mmap uses normal user pages, so we don't need the entire
+        * granule, but the entire region we're mapping must support the same
+        * attribute.
         */
+       if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
+               return 1;
+
+       /*
+        * Intel firmware doesn't tell us about all the MMIO regions, so
+        * in general we have to allow mmap requests.  But if EFI *does*
+        * tell us about anything inside this region, we should deny it.
+        * The user can always map a smaller region to avoid the overlap.
+        */
+       if (efi_memmap_intersects(phys_addr, size))
+               return 0;
+
        return 1;
 }
 
index e7873eeae448c15ec1e6ecb7718974e67bce8786..55fd2d5471e127df16d9edb7a54b161bb4255767 100644 (file)
@@ -767,7 +767,7 @@ ENTRY(ia64_leave_syscall)
        ld8.fill r15=[r3]                       // M0|1 restore r15
        mov b6=r18                              // I0   restore b6
 
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
+       LOAD_PHYS_STACK_REG_SIZE(r17)
        mov f9=f0                                       // F    clear f9
 (pKStk) br.cond.dpnt.many skip_rbs_switch              // B
 
@@ -775,7 +775,6 @@ ENTRY(ia64_leave_syscall)
        shr.u r18=r19,16                // I0|1 get byte size of existing "dirty" partition
        cover                           // B    add current frame into dirty partition & set cr.ifs
        ;;
-(pUStk) ld4 r17=[r17]                  // M0|1 r17 = cpu_data->phys_stacked_size_p8
        mov r19=ar.bsp                  // M2   get new backing store pointer
        mov f10=f0                      // F    clear f10
 
@@ -953,9 +952,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        shr.u r18=r19,16        // get byte size of existing "dirty" partition
        ;;
        mov r16=ar.bsp          // get existing backing store pointer
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-       ;;
-       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
+       LOAD_PHYS_STACK_REG_SIZE(r17)
 (pKStk)        br.cond.dpnt skip_rbs_switch
 
        /*
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
new file mode 100644 (file)
index 0000000..d3e9f33
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * err_inject.c -
+ *     1.) Inject errors to a processor.
+ *     2.) Query error injection capabilities.
+ * This driver along with user space code can be acting as an error
+ * injection tool.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Written by: Fenghua Yu <fenghua.yu@intel.com>, Intel Corporation
+ * Copyright (C) 2006, Intel Corp.  All rights reserved.
+ *
+ */
+#include <linux/sysdev.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#define ERR_INJ_DEBUG
+
+#define ERR_DATA_BUFFER_SIZE 3                 // Three 8-byte;
+
+#define define_one_ro(name)                                            \
+static SYSDEV_ATTR(name, 0444, show_##name, NULL)
+
+#define define_one_rw(name)                                            \
+static SYSDEV_ATTR(name, 0644, show_##name, store_##name)
+
+static u64 call_start[NR_CPUS];
+static u64 phys_addr[NR_CPUS];
+static u64 err_type_info[NR_CPUS];
+static u64 err_struct_info[NR_CPUS];
+static struct {
+       u64 data1;
+       u64 data2;
+       u64 data3;
+} __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS];
+static s64 status[NR_CPUS];
+static u64 capabilities[NR_CPUS];
+static u64 resources[NR_CPUS];
+
+#define show(name)                                                     \
+static ssize_t                                                                 \
+show_##name(struct sys_device *dev, char *buf)                         \
+{                                                                      \
+       u32 cpu=dev->id;                                                \
+       return sprintf(buf, "%lx\n", name[cpu]);                        \
+}
+
+#define store(name)                                                    \
+static ssize_t                                                                 \
+store_##name(struct sys_device *dev, const char *buf, size_t size)     \
+{                                                                      \
+       unsigned int cpu=dev->id;                                       \
+       name[cpu] = simple_strtoull(buf, NULL, 16);                     \
+       return size;                                                    \
+}
+
+show(call_start)
+
+/* It's user's responsibility to call the PAL procedure on a specific
+ * processor. The cpu number in driver is only used for storing data.
+ */
+static ssize_t
+store_call_start(struct sys_device *dev, const char *buf, size_t size)
+{
+       unsigned int cpu=dev->id;
+       unsigned long call_start = simple_strtoull(buf, NULL, 16);
+
+#ifdef ERR_INJ_DEBUG
+       printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
+       printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
+       printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
+       printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
+                         err_data_buffer[cpu].data1,
+                         err_data_buffer[cpu].data2,
+                         err_data_buffer[cpu].data3);
+#endif
+       switch (call_start) {
+           case 0: /* Do nothing. */
+               break;
+           case 1: /* Call pal_mc_error_inject in physical mode. */
+               status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu],
+                                       err_struct_info[cpu],
+                                       ia64_tpa(&err_data_buffer[cpu]),
+                                       &capabilities[cpu],
+                                       &resources[cpu]);
+               break;
+           case 2: /* Call pal_mc_error_inject in virtual mode. */
+               status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu],
+                                       err_struct_info[cpu],
+                                       ia64_tpa(&err_data_buffer[cpu]),
+                                       &capabilities[cpu],
+                                       &resources[cpu]);
+               break;
+           default:
+               status[cpu] = -EINVAL;
+               break;
+       }
+
+#ifdef ERR_INJ_DEBUG
+       printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
+       printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]);
+       printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
+#endif
+       return size;
+}
+
+show(err_type_info)
+store(err_type_info)
+
+static ssize_t
+show_virtual_to_phys(struct sys_device *dev, char *buf)
+{
+       unsigned int cpu=dev->id;
+       return sprintf(buf, "%lx\n", phys_addr[cpu]);
+}
+
+static ssize_t
+store_virtual_to_phys(struct sys_device *dev, const char *buf, size_t size)
+{
+       unsigned int cpu=dev->id;
+       u64 virt_addr=simple_strtoull(buf, NULL, 16);
+       int ret;
+
+        ret = get_user_pages(current, current->mm, virt_addr,
+                        1, VM_READ, 0, NULL, NULL);
+       if (ret<=0) {
+#ifdef ERR_INJ_DEBUG
+               printk("Virtual address %lx is not existing.\n",virt_addr);
+#endif
+               return -EINVAL;
+       }
+
+       phys_addr[cpu] = ia64_tpa(virt_addr);
+       return size;
+}
+
+show(err_struct_info)
+store(err_struct_info)
+
+static ssize_t
+show_err_data_buffer(struct sys_device *dev, char *buf)
+{
+       unsigned int cpu=dev->id;
+
+       return sprintf(buf, "%lx, %lx, %lx\n",
+                       err_data_buffer[cpu].data1,
+                       err_data_buffer[cpu].data2,
+                       err_data_buffer[cpu].data3);
+}
+
+static ssize_t
+store_err_data_buffer(struct sys_device *dev, const char *buf, size_t size)
+{
+       unsigned int cpu=dev->id;
+       int ret;
+
+#ifdef ERR_INJ_DEBUG
+       printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
+                err_data_buffer[cpu].data1,
+                err_data_buffer[cpu].data2,
+                err_data_buffer[cpu].data3,
+                cpu);
+#endif
+       ret=sscanf(buf, "%lx, %lx, %lx",
+                       &err_data_buffer[cpu].data1,
+                       &err_data_buffer[cpu].data2,
+                       &err_data_buffer[cpu].data3);
+       if (ret!=ERR_DATA_BUFFER_SIZE)
+               return -EINVAL;
+
+       return size;
+}
+
+show(status)
+show(capabilities)
+show(resources)
+
+define_one_rw(call_start);
+define_one_rw(err_type_info);
+define_one_rw(err_struct_info);
+define_one_rw(err_data_buffer);
+define_one_rw(virtual_to_phys);
+define_one_ro(status);
+define_one_ro(capabilities);
+define_one_ro(resources);
+
+static struct attribute *default_attrs[] = {
+       &attr_call_start.attr,
+       &attr_virtual_to_phys.attr,
+       &attr_err_type_info.attr,
+       &attr_err_struct_info.attr,
+       &attr_err_data_buffer.attr,
+       &attr_status.attr,
+       &attr_capabilities.attr,
+       &attr_resources.attr,
+       NULL
+};
+
+static struct attribute_group err_inject_attr_group = {
+       .attrs = default_attrs,
+       .name = "err_inject"
+};
+/* Add/Remove err_inject interface for CPU device */
+static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev)
+{
+       return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
+}
+
+static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev)
+{
+       sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
+       return 0;
+}
+static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
+               unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+       struct sys_device *sys_dev;
+
+       sys_dev = get_cpu_sysdev(cpu);
+       switch (action) {
+       case CPU_ONLINE:
+               err_inject_add_dev(sys_dev);
+               break;
+       case CPU_DEAD:
+               err_inject_remove_dev(sys_dev);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
+{
+       .notifier_call = err_inject_cpu_callback,
+};
+
+static int __init
+err_inject_init(void)
+{
+       int i;
+
+#ifdef ERR_INJ_DEBUG
+       printk(KERN_INFO "Enter error injection driver.\n");
+#endif
+       for_each_online_cpu(i) {
+               err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
+                               (void *)(long)i);
+       }
+
+       register_hotcpu_notifier(&err_inject_cpu_notifier);
+
+       return 0;
+}
+
+static void __exit
+err_inject_exit(void)
+{
+       int i;
+       struct sys_device *sys_dev;
+
+#ifdef ERR_INJ_DEBUG
+       printk(KERN_INFO "Exit error injection driver.\n");
+#endif
+       for_each_online_cpu(i) {
+               sys_dev = get_cpu_sysdev(i);
+               sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
+       }
+       unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>");
+MODULE_DESCRIPTION("MC error injection kenrel sysfs interface");
+MODULE_LICENSE("GPL");
index 6b7fcbd3f6f1a9789ccdc755ee0a59122578fe78..34f44d8be00daa98e9eb918620632c6e89432e11 100644 (file)
@@ -374,6 +374,7 @@ ENTRY(alt_dtlb_miss)
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
        mov r21=cr.ipsr
        mov r31=pr
+       mov r24=PERCPU_ADDR
        ;;
 #ifdef CONFIG_DISABLE_VHPT
        shr.u r22=r16,61                        // get the region number into r21
@@ -386,22 +387,30 @@ ENTRY(alt_dtlb_miss)
 (p8)   mov r29=b0                              // save b0
 (p8)   br.cond.dptk dtlb_fault
 #endif
+       cmp.ge p10,p11=r16,r24                  // access to per_cpu_data?
+       tbit.z p12,p0=r16,61                    // access to region 6?
+       mov r25=PERCPU_PAGE_SHIFT << 2
+       mov r26=PERCPU_PAGE_SIZE
+       nop.m 0
+       nop.b 0
+       ;;
+(p10)  mov r19=IA64_KR(PER_CPU_DATA)
+(p11)  and r19=r19,r16                         // clear non-ppn fields
        extr.u r23=r21,IA64_PSR_CPL0_BIT,2      // extract psr.cpl
        and r22=IA64_ISR_CODE_MASK,r20          // get the isr.code field
        tbit.nz p6,p7=r20,IA64_ISR_SP_BIT       // is speculation bit on?
-       shr.u r18=r16,57                        // move address bit 61 to bit 4
-       and r19=r19,r16                         // clear ed, reserved bits, and PTE control bits
        tbit.nz p9,p0=r20,IA64_ISR_NA_BIT       // is non-access bit on?
        ;;
-       andcm r18=0x10,r18      // bit 4=~address-bit(61)
+(p10)  sub r19=r19,r26
+(p10)  mov cr.itir=r25
        cmp.ne p8,p0=r0,r23
 (p9)   cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22  // check isr.code field
+(p12)  dep r17=-1,r17,4,1                      // set ma=UC for region 6 addr
 (p8)   br.cond.spnt page_fault
 
        dep r21=-1,r21,IA64_PSR_ED_BIT,1
-       or r19=r19,r17          // insert PTE control bits into r19
        ;;
-       or r19=r19,r18          // set bit 4 (uncached) if the access was to region 6
+       or r19=r19,r17          // insert PTE control bits into r19
 (p6)   mov cr.ipsr=r21
        ;;
 (p7)   itc.d r19               // insert the TLB entry
index c6b607c00deea80c4268468412b60e7fff8b5128..8c9c26aa6ae092e130f93a4076025d17a297ab92 100644 (file)
@@ -101,14 +101,6 @@ ia64_do_tlb_purge:
        ;;
        srlz.d
        ;;
-       // 2. Purge DTR for PERCPU data.
-       movl r16=PERCPU_ADDR
-       mov r18=PERCPU_PAGE_SHIFT<<2
-       ;;
-       ptr.d r16,r18
-       ;;
-       srlz.d
-       ;;
        // 3. Purge ITR for PAL code.
        GET_THIS_PADDR(r2, ia64_mca_pal_base)
        ;;
@@ -196,22 +188,6 @@ ia64_reload_tr:
        srlz.i
        srlz.d
        ;;
-       // 2. Reload DTR register for PERCPU data.
-       GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
-       ;;
-       movl r16=PERCPU_ADDR            // vaddr
-       movl r18=PERCPU_PAGE_SHIFT<<2
-       ;;
-       mov cr.itir=r18
-       mov cr.ifa=r16
-       ;;
-       ld8 r18=[r2]                    // load per-CPU PTE
-       mov r16=IA64_TR_PERCPU_DATA;
-       ;;
-       itr.d dtr[r16]=r18
-       ;;
-       srlz.d
-       ;;
        // 3. Reload ITR for PAL code.
        GET_THIS_PADDR(r2, ia64_mca_pal_pte)
        ;;
index bc11bb096f584735fe8220c3f30c9063e9b4a8c2..e796e29f8e156df255c93f85fa5cd8f7a882dace 100644 (file)
@@ -195,3 +195,23 @@ ia64_patch_gate (void)
        ia64_patch_vtop(START(vtop), END(vtop));
        ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
 }
+
+void ia64_patch_phys_stack_reg(unsigned long val)
+{
+       s32 * offp = (s32 *) __start___phys_stack_reg_patchlist;
+       s32 * end = (s32 *) __end___phys_stack_reg_patchlist;
+       u64 ip, mask, imm;
+
+       /* see instruction format A4: adds r1 = imm13, r3 */
+       mask = (0x3fUL << 27) | (0x7f << 13);
+       imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13;
+
+       while (offp < end) {
+               ip = (u64) offp + *offp;
+               ia64_patch(ip, mask, imm);
+               ia64_fc(ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
index dc7dd7648ec5fe7490af9fee94b20a2bce5d4690..6e19da122ae3a75952513aaa49166f511440c2ac 100644 (file)
@@ -75,7 +75,6 @@ extern void ia64_setup_printk_clock(void);
 
 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
-DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
 unsigned long ia64_cycles_per_usec;
 struct ia64_boot_param *ia64_boot_param;
 struct screen_info screen_info;
@@ -869,6 +868,7 @@ void __cpuinit
 cpu_init (void)
 {
        extern void __cpuinit ia64_mmu_init (void *);
+       static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
        unsigned long num_phys_stacked;
        pal_vm_info_2_u_t vmi;
        unsigned int max_ctx;
@@ -982,7 +982,10 @@ cpu_init (void)
                num_phys_stacked = 96;
        }
        /* size of physical stacked register partition plus 8 bytes: */
-       __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
+       if (num_phys_stacked > max_num_phys_stacked) {
+               ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
+               max_num_phys_stacked = num_phys_stacked;
+       }
        platform_cpu_init();
        pm_idle = default_idle;
 }
index 25dd55e4db2482985e86563599be0f05ec74058b..69238264211876fe1f0d506f4cd0cf9c4a5b20a2 100644 (file)
@@ -78,6 +78,13 @@ SECTIONS
          __stop___mca_table = .;
        }
 
+  .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
+       {
+         __start___phys_stack_reg_patchlist = .;
+         *(.data.patch.phys_stack_reg)
+         __end___phys_stack_reg_patchlist = .;
+       }
+
   /* Global data */
   _data = .;
 
index 2da841110727b223c4e9c4720a6351a94c92a7c1..cffb1e8325e8e226fdde5e996f33aad425bf6006 100644 (file)
@@ -355,7 +355,7 @@ setup_gate (void)
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
-       unsigned long psr, pta, impl_va_bits;
+       unsigned long pta, impl_va_bits;
        extern void __devinit tlb_init (void);
 
 #ifdef CONFIG_DISABLE_VHPT
@@ -364,15 +364,6 @@ ia64_mmu_init (void *my_cpu_data)
 #      define VHPT_ENABLE_BIT  1
 #endif
 
-       /* Pin mapping for percpu area into TLB */
-       psr = ia64_clear_ic();
-       ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
-                pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
-                PERCPU_PAGE_SHIFT);
-
-       ia64_set_psr(psr);
-       ia64_srlz_i();
-
        /*
         * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
         * address space.  The IA-64 architecture guarantees that at least 50 bits of
index 4280c074d64e69e28badb70de4c9050f0524b986..2a140627dfd6eefb06178cee86b87ef2282c11b1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
  *     Bjorn Helgaas <bjorn.helgaas@hp.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
 #include <asm/io.h>
 #include <asm/meminit.h>
 
 static inline void __iomem *
-__ioremap (unsigned long offset, unsigned long size)
+__ioremap (unsigned long phys_addr)
 {
-       return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+       return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
 }
 
 void __iomem *
-ioremap (unsigned long offset, unsigned long size)
+ioremap (unsigned long phys_addr, unsigned long size)
 {
+       void __iomem *addr;
+       struct vm_struct *area;
+       unsigned long offset;
+       pgprot_t prot;
        u64 attr;
        unsigned long gran_base, gran_size;
+       unsigned long page_base;
 
        /*
         * For things in kern_memmap, we must use the same attribute
         * as the rest of the kernel.  For more details, see
         * Documentation/ia64/aliasing.txt.
         */
-       attr = kern_mem_attribute(offset, size);
+       attr = kern_mem_attribute(phys_addr, size);
        if (attr & EFI_MEMORY_WB)
-               return (void __iomem *) phys_to_virt(offset);
+               return (void __iomem *) phys_to_virt(phys_addr);
        else if (attr & EFI_MEMORY_UC)
-               return __ioremap(offset, size);
+               return __ioremap(phys_addr);
 
        /*
         * Some chipsets don't support UC access to memory.  If
         * WB is supported for the whole granule, we prefer that.
         */
-       gran_base = GRANULEROUNDDOWN(offset);
-       gran_size = GRANULEROUNDUP(offset + size) - gran_base;
+       gran_base = GRANULEROUNDDOWN(phys_addr);
+       gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
        if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
-               return (void __iomem *) phys_to_virt(offset);
+               return (void __iomem *) phys_to_virt(phys_addr);
 
-       return __ioremap(offset, size);
+       /*
+        * WB is not supported for the whole granule, so we can't use
+        * the region 7 identity mapping.  If we can safely cover the
+        * area with kernel page table mappings, we can use those
+        * instead.
+        */
+       page_base = phys_addr & PAGE_MASK;
+       size = PAGE_ALIGN(phys_addr + size) - page_base;
+       if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
+               prot = PAGE_KERNEL;
+
+               /*
+                * Mappings have to be page-aligned
+                */
+               offset = phys_addr & ~PAGE_MASK;
+               phys_addr &= PAGE_MASK;
+
+               /*
+                * Ok, go for it..
+                */
+               area = get_vm_area(size, VM_IOREMAP);
+               if (!area)
+                       return NULL;
+
+               area->phys_addr = phys_addr;
+               addr = (void __iomem *) area->addr;
+               if (ioremap_page_range((unsigned long) addr,
+                               (unsigned long) addr + size, phys_addr, prot)) {
+                       vunmap((void __force *) addr);
+                       return NULL;
+               }
+
+               return (void __iomem *) (offset + (char __iomem *)addr);
+       }
+
+       return __ioremap(phys_addr);
 }
 EXPORT_SYMBOL(ioremap);
 
 void __iomem *
-ioremap_nocache (unsigned long offset, unsigned long size)
+ioremap_nocache (unsigned long phys_addr, unsigned long size)
 {
-       if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
+       if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
                return NULL;
 
-       return __ioremap(offset, size);
+       return __ioremap(phys_addr);
 }
 EXPORT_SYMBOL(ioremap_nocache);
+
+void
+iounmap (volatile void __iomem *addr)
+{
+       if (REGION_NUMBER(addr) == RGN_GATE)
+               vunmap((void *) ((unsigned long) addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
index 0e83f3b419b5597e8f56899cb238a121a071ac5e..9f635896d2527b86a1a84702dfa8030369511afc 100644 (file)
@@ -659,8 +659,6 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
                return -EINVAL;
        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
                                    vma->vm_page_prot);
-       if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
-               return -EINVAL;
 
        addr = pci_get_legacy_mem(bus);
        if (IS_ERR(addr))
index c22b4658fc6120144753962bd667c10c9a646f8c..c1642fd640297b0796dec52172b80d8e0652f7dc 100644 (file)
@@ -103,6 +103,16 @@ name:
 # define FSYS_RETURN   br.ret.sptk.many b6
 #endif
 
+/*
+ * If physical stack register size is different from DEF_NUM_STACK_REG,
+ * dynamically patch the kernel for correct size.
+ */
+       .section ".data.patch.phys_stack_reg", "a"
+       .previous
+#define LOAD_PHYS_STACK_REG_SIZE(reg)                  \
+[1:]   adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0;        \
+       .xdata4 ".data.patch.phys_stack_reg", 1b-.
+
 /*
  * Up until early 2004, use of .align within a function caused bad unwind info.
  * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
index 6311e168cd34319389161c044b52cf69fa74f3be..eb17a86929679834eaf5372b01a57ab5fd84edb0 100644 (file)
@@ -421,11 +421,7 @@ __writeq (unsigned long val, volatile void __iomem *addr)
 
 extern void __iomem * ioremap(unsigned long offset, unsigned long size);
 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
-
-static inline void
-iounmap (volatile void __iomem *addr)
-{
-}
+extern void iounmap (volatile void __iomem *addr);
 
 /* Use normal IO mappings for DMI */
 #define dmi_ioremap ioremap
index 221b5cb564b253bc0e73c8d4aa75d5ba859d3c7d..7e55a584975c13d077d459c3612f328ab4356130 100644 (file)
@@ -29,8 +29,7 @@
  */
 #define IA64_TR_KERNEL         0       /* itr0, dtr0: maps kernel image (code & data) */
 #define IA64_TR_PALCODE                1       /* itr1: maps PALcode as required by EFI */
-#define IA64_TR_PERCPU_DATA    1       /* dtr1: percpu data */
-#define IA64_TR_CURRENT_STACK  2       /* dtr2: maps kernel's memory- & register-stacks */
+#define IA64_TR_CURRENT_STACK  1       /* dtr1: maps kernel's memory- & register-stacks */
 
 /* Processor status register bits: */
 #define IA64_PSR_BE_BIT                1
index 67656ce767c22d1f06c78f6e06d0ac1528fd583d..abfcb3a2588fb7de4e98190fd95723d2de526e55 100644 (file)
@@ -89,6 +89,8 @@
 #define PAL_GET_PSTATE_TYPE_AVGNORESET 2
 #define PAL_GET_PSTATE_TYPE_INSTANT    3
 
+#define PAL_MC_ERROR_INJECT    276     /* Injects processor error or returns injection capabilities */
+
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
@@ -1235,6 +1237,37 @@ ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_in
        return iprv.status;
 }
 
+/* Injects the requested processor error or returns info on
+ * supported injection capabilities for current processor implementation
+ */
+static inline s64
+ia64_pal_mc_error_inject_phys (u64 err_type_info, u64 err_struct_info,
+                       u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+                         err_struct_info, err_data_buffer);
+       if (capabilities)
+               *capabilities= iprv.v0;
+       if (resources)
+               *resources= iprv.v1;
+       return iprv.status;
+}
+
+static inline s64
+ia64_pal_mc_error_inject_virt (u64 err_type_info, u64 err_struct_info,
+                       u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+                         err_struct_info, err_data_buffer);
+       if (capabilities)
+               *capabilities= iprv.v0;
+       if (resources)
+               *resources= iprv.v1;
+       return iprv.status;
+}
+
 /* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
  * attempt to correct any expected machine checks.
  */
index 4797f3535e6ddcfa5f555039289ae7def18e0787..a71543084fb41dc51cd90b168824a6a5a4b5eb26 100644 (file)
@@ -20,6 +20,7 @@ extern void ia64_patch_imm60 (u64 insn_addr, u64 val);                /* patch "brl" w/ip-rel
 
 extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
 extern void ia64_patch_vtop (unsigned long start, unsigned long end);
+extern void ia64_patch_phys_stack_reg(unsigned long val);
 extern void ia64_patch_gate (void);
 
 #endif /* _ASM_IA64_PATCH_H */
index 4f4ee1c2db2fb8177b12a589fb9b1a512915505e..db81ba406ceff8b1cb4bd7226741cbdbb2413eb8 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/ptrace.h>
 #include <asm/ustack.h>
 
+#define IA64_NUM_PHYS_STACK_REG        96
 #define IA64_NUM_DBG_REGS      8
 
 #define DEFAULT_MAP_BASE       __IA64_UL_CONST(0x2000000000000000)
index e9eb7f62d32bb259f0ed8a52b836d8fa677e6b02..dc42a359894fe759617fdf47b5e6a048eda8190b 100644 (file)
@@ -11,6 +11,7 @@
 extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
 extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
 extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
+extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
 extern char __start_gate_section[];
 extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
 extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];