]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kernel/cpu/cpu_debug.c
Merge branches 'x86/apic', 'x86/asm', 'x86/cleanups', 'x86/debug', 'x86/kconfig'...
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / cpu / cpu_debug.c
1 /*
2  * CPU x86 architecture debug code
3  *
4  * Copyright(C) 2009 Jaswinder Singh Rajput
5  *
6  * For licencing details see kernel-base/COPYING
7  */
8
9 #include <linux/interrupt.h>
10 #include <linux/compiler.h>
11 #include <linux/seq_file.h>
12 #include <linux/debugfs.h>
13 #include <linux/kprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/signal.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/smp.h>
25
26 #include <asm/cpu_debug.h>
27 #include <asm/paravirt.h>
28 #include <asm/system.h>
29 #include <asm/traps.h>
30 #include <asm/apic.h>
31 #include <asm/desc.h>
32
33 static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
34 static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
35 static DEFINE_PER_CPU(unsigned, cpu_modelflag);
36 static DEFINE_PER_CPU(int, cpu_priv_count);
37 static DEFINE_PER_CPU(unsigned, cpu_model);
38
39 static DEFINE_MUTEX(cpu_debug_lock);
40
41 static struct dentry *cpu_debugfs_dir;
42
43 static struct cpu_debug_base cpu_base[] = {
44         { "mc",         CPU_MC,         0       },
45         { "monitor",    CPU_MONITOR,    0       },
46         { "time",       CPU_TIME,       0       },
47         { "pmc",        CPU_PMC,        1       },
48         { "platform",   CPU_PLATFORM,   0       },
49         { "apic",       CPU_APIC,       0       },
50         { "poweron",    CPU_POWERON,    0       },
51         { "control",    CPU_CONTROL,    0       },
52         { "features",   CPU_FEATURES,   0       },
53         { "lastbranch", CPU_LBRANCH,    0       },
54         { "bios",       CPU_BIOS,       0       },
55         { "freq",       CPU_FREQ,       0       },
56         { "mtrr",       CPU_MTRR,       0       },
57         { "perf",       CPU_PERF,       0       },
58         { "cache",      CPU_CACHE,      0       },
59         { "sysenter",   CPU_SYSENTER,   0       },
60         { "therm",      CPU_THERM,      0       },
61         { "misc",       CPU_MISC,       0       },
62         { "debug",      CPU_DEBUG,      0       },
63         { "pat",        CPU_PAT,        0       },
64         { "vmx",        CPU_VMX,        0       },
65         { "call",       CPU_CALL,       0       },
66         { "base",       CPU_BASE,       0       },
67         { "smm",        CPU_SMM,        0       },
68         { "svm",        CPU_SVM,        0       },
69         { "osvm",       CPU_OSVM,       0       },
70         { "tss",        CPU_TSS,        0       },
71         { "cr",         CPU_CR,         0       },
72         { "dt",         CPU_DT,         0       },
73         { "registers",  CPU_REG_ALL,    0       },
74 };
75
76 static struct cpu_file_base cpu_file[] = {
77         { "index",      CPU_REG_ALL,    0       },
78         { "value",      CPU_REG_ALL,    1       },
79 };
80
81 /* Intel Registers Range */
82 static struct cpu_debug_range cpu_intel_range[] = {
83         { 0x00000000, 0x00000001, CPU_MC,       CPU_INTEL_ALL           },
84         { 0x00000006, 0x00000007, CPU_MONITOR,  CPU_CX_AT_XE            },
85         { 0x00000010, 0x00000010, CPU_TIME,     CPU_INTEL_ALL           },
86         { 0x00000011, 0x00000013, CPU_PMC,      CPU_INTEL_PENTIUM       },
87         { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE         },
88         { 0x0000001B, 0x0000001B, CPU_APIC,     CPU_P6_CX_AT_XE         },
89
90         { 0x0000002A, 0x0000002A, CPU_POWERON,  CPU_PX_CX_AT_XE         },
91         { 0x0000002B, 0x0000002B, CPU_POWERON,  CPU_INTEL_XEON          },
92         { 0x0000002C, 0x0000002C, CPU_FREQ,     CPU_INTEL_XEON          },
93         { 0x0000003A, 0x0000003A, CPU_CONTROL,  CPU_CX_AT_XE            },
94
95         { 0x00000040, 0x00000043, CPU_LBRANCH,  CPU_PM_CX_AT_XE         },
96         { 0x00000044, 0x00000047, CPU_LBRANCH,  CPU_PM_CO_AT            },
97         { 0x00000060, 0x00000063, CPU_LBRANCH,  CPU_C2_AT               },
98         { 0x00000064, 0x00000067, CPU_LBRANCH,  CPU_INTEL_ATOM          },
99
100         { 0x00000079, 0x00000079, CPU_BIOS,     CPU_P6_CX_AT_XE         },
101         { 0x00000088, 0x0000008A, CPU_CACHE,    CPU_INTEL_P6            },
102         { 0x0000008B, 0x0000008B, CPU_BIOS,     CPU_P6_CX_AT_XE         },
103         { 0x0000009B, 0x0000009B, CPU_MONITOR,  CPU_INTEL_XEON          },
104
105         { 0x000000C1, 0x000000C2, CPU_PMC,      CPU_P6_CX_AT            },
106         { 0x000000CD, 0x000000CD, CPU_FREQ,     CPU_CX_AT               },
107         { 0x000000E7, 0x000000E8, CPU_PERF,     CPU_CX_AT               },
108         { 0x000000FE, 0x000000FE, CPU_MTRR,     CPU_P6_CX_XE            },
109
110         { 0x00000116, 0x00000116, CPU_CACHE,    CPU_INTEL_P6            },
111         { 0x00000118, 0x00000118, CPU_CACHE,    CPU_INTEL_P6            },
112         { 0x00000119, 0x00000119, CPU_CACHE,    CPU_INTEL_PX            },
113         { 0x0000011A, 0x0000011B, CPU_CACHE,    CPU_INTEL_P6            },
114         { 0x0000011E, 0x0000011E, CPU_CACHE,    CPU_PX_CX_AT            },
115
116         { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE         },
117         { 0x00000179, 0x0000017A, CPU_MC,       CPU_PX_CX_AT_XE         },
118         { 0x0000017B, 0x0000017B, CPU_MC,       CPU_P6_XE               },
119         { 0x00000186, 0x00000187, CPU_PMC,      CPU_P6_CX_AT            },
120         { 0x00000198, 0x00000199, CPU_PERF,     CPU_PM_CX_AT_XE         },
121         { 0x0000019A, 0x0000019A, CPU_TIME,     CPU_PM_CX_AT_XE         },
122         { 0x0000019B, 0x0000019D, CPU_THERM,    CPU_PM_CX_AT_XE         },
123         { 0x000001A0, 0x000001A0, CPU_MISC,     CPU_PM_CX_AT_XE         },
124
125         { 0x000001C9, 0x000001C9, CPU_LBRANCH,  CPU_PM_CX_AT            },
126         { 0x000001D7, 0x000001D8, CPU_LBRANCH,  CPU_INTEL_XEON          },
127         { 0x000001D9, 0x000001D9, CPU_DEBUG,    CPU_CX_AT_XE            },
128         { 0x000001DA, 0x000001DA, CPU_LBRANCH,  CPU_INTEL_XEON          },
129         { 0x000001DB, 0x000001DB, CPU_LBRANCH,  CPU_P6_XE               },
130         { 0x000001DC, 0x000001DC, CPU_LBRANCH,  CPU_INTEL_P6            },
131         { 0x000001DD, 0x000001DE, CPU_LBRANCH,  CPU_PX_CX_AT_XE         },
132         { 0x000001E0, 0x000001E0, CPU_LBRANCH,  CPU_INTEL_P6            },
133
134         { 0x00000200, 0x0000020F, CPU_MTRR,     CPU_P6_CX_XE            },
135         { 0x00000250, 0x00000250, CPU_MTRR,     CPU_P6_CX_XE            },
136         { 0x00000258, 0x00000259, CPU_MTRR,     CPU_P6_CX_XE            },
137         { 0x00000268, 0x0000026F, CPU_MTRR,     CPU_P6_CX_XE            },
138         { 0x00000277, 0x00000277, CPU_PAT,      CPU_C2_AT_XE            },
139         { 0x000002FF, 0x000002FF, CPU_MTRR,     CPU_P6_CX_XE            },
140
141         { 0x00000300, 0x00000308, CPU_PMC,      CPU_INTEL_XEON          },
142         { 0x00000309, 0x0000030B, CPU_PMC,      CPU_C2_AT_XE            },
143         { 0x0000030C, 0x00000311, CPU_PMC,      CPU_INTEL_XEON          },
144         { 0x00000345, 0x00000345, CPU_PMC,      CPU_C2_AT               },
145         { 0x00000360, 0x00000371, CPU_PMC,      CPU_INTEL_XEON          },
146         { 0x0000038D, 0x00000390, CPU_PMC,      CPU_C2_AT               },
147         { 0x000003A0, 0x000003BE, CPU_PMC,      CPU_INTEL_XEON          },
148         { 0x000003C0, 0x000003CD, CPU_PMC,      CPU_INTEL_XEON          },
149         { 0x000003E0, 0x000003E1, CPU_PMC,      CPU_INTEL_XEON          },
150         { 0x000003F0, 0x000003F0, CPU_PMC,      CPU_INTEL_XEON          },
151         { 0x000003F1, 0x000003F1, CPU_PMC,      CPU_C2_AT_XE            },
152         { 0x000003F2, 0x000003F2, CPU_PMC,      CPU_INTEL_XEON          },
153
154         { 0x00000400, 0x00000402, CPU_MC,       CPU_PM_CX_AT_XE         },
155         { 0x00000403, 0x00000403, CPU_MC,       CPU_INTEL_XEON          },
156         { 0x00000404, 0x00000406, CPU_MC,       CPU_PM_CX_AT_XE         },
157         { 0x00000407, 0x00000407, CPU_MC,       CPU_INTEL_XEON          },
158         { 0x00000408, 0x0000040A, CPU_MC,       CPU_PM_CX_AT_XE         },
159         { 0x0000040B, 0x0000040B, CPU_MC,       CPU_INTEL_XEON          },
160         { 0x0000040C, 0x0000040E, CPU_MC,       CPU_PM_CX_XE            },
161         { 0x0000040F, 0x0000040F, CPU_MC,       CPU_INTEL_XEON          },
162         { 0x00000410, 0x00000412, CPU_MC,       CPU_PM_CX_AT_XE         },
163         { 0x00000413, 0x00000417, CPU_MC,       CPU_CX_AT_XE            },
164         { 0x00000480, 0x0000048B, CPU_VMX,      CPU_CX_AT_XE            },
165
166         { 0x00000600, 0x00000600, CPU_DEBUG,    CPU_PM_CX_AT_XE         },
167         { 0x00000680, 0x0000068F, CPU_LBRANCH,  CPU_INTEL_XEON          },
168         { 0x000006C0, 0x000006CF, CPU_LBRANCH,  CPU_INTEL_XEON          },
169
170         { 0x000107CC, 0x000107D3, CPU_PMC,      CPU_INTEL_XEON_MP       },
171
172         { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON          },
173         { 0xC0000081, 0xC0000082, CPU_CALL,     CPU_INTEL_XEON          },
174         { 0xC0000084, 0xC0000084, CPU_CALL,     CPU_INTEL_XEON          },
175         { 0xC0000100, 0xC0000102, CPU_BASE,     CPU_INTEL_XEON          },
176 };
177
178 /* AMD Registers Range */
179 static struct cpu_debug_range cpu_amd_range[] = {
180         { 0x00000010, 0x00000010, CPU_TIME,     CPU_ALL,                },
181         { 0x0000001B, 0x0000001B, CPU_APIC,     CPU_ALL,                },
182         { 0x000000FE, 0x000000FE, CPU_MTRR,     CPU_ALL,                },
183
184         { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL,                },
185         { 0x00000179, 0x0000017A, CPU_MC,       CPU_ALL,                },
186         { 0x0000017B, 0x0000017B, CPU_MC,       CPU_ALL,                },
187         { 0x000001D9, 0x000001D9, CPU_DEBUG,    CPU_ALL,                },
188         { 0x000001DB, 0x000001DE, CPU_LBRANCH,  CPU_ALL,                },
189
190         { 0x00000200, 0x0000020F, CPU_MTRR,     CPU_ALL,                },
191         { 0x00000250, 0x00000250, CPU_MTRR,     CPU_ALL,                },
192         { 0x00000258, 0x00000259, CPU_MTRR,     CPU_ALL,                },
193         { 0x00000268, 0x0000026F, CPU_MTRR,     CPU_ALL,                },
194         { 0x00000277, 0x00000277, CPU_PAT,      CPU_ALL,                },
195         { 0x000002FF, 0x000002FF, CPU_MTRR,     CPU_ALL,                },
196
197         { 0x00000400, 0x00000417, CPU_MC,       CPU_ALL,                },
198
199         { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL,                },
200         { 0xC0000081, 0xC0000084, CPU_CALL,     CPU_ALL,                },
201         { 0xC0000100, 0xC0000102, CPU_BASE,     CPU_ALL,                },
202         { 0xC0000103, 0xC0000103, CPU_TIME,     CPU_ALL,                },
203
204         { 0xC0000408, 0xC000040A, CPU_MC,       CPU_ALL,                },
205
206         { 0xc0010000, 0xc0010007, CPU_PMC,      CPU_ALL,                },
207         { 0xc0010010, 0xc0010010, CPU_MTRR,     CPU_ALL,                },
208         { 0xc0010016, 0xc001001A, CPU_MTRR,     CPU_ALL,                },
209         { 0xc001001D, 0xc001001D, CPU_MTRR,     CPU_ALL,                },
210         { 0xc0010030, 0xc0010035, CPU_BIOS,     CPU_ALL,                },
211         { 0xc0010056, 0xc0010056, CPU_SMM,      CPU_ALL,                },
212         { 0xc0010061, 0xc0010063, CPU_SMM,      CPU_ALL,                },
213         { 0xc0010074, 0xc0010074, CPU_MC,       CPU_ALL,                },
214         { 0xc0010111, 0xc0010113, CPU_SMM,      CPU_ALL,                },
215         { 0xc0010114, 0xc0010118, CPU_SVM,      CPU_ALL,                },
216         { 0xc0010119, 0xc001011A, CPU_SMM,      CPU_ALL,                },
217         { 0xc0010140, 0xc0010141, CPU_OSVM,     CPU_ALL,                },
218         { 0xc0010156, 0xc0010156, CPU_SMM,      CPU_ALL,                },
219 };
220
221
222 static int get_cpu_modelflag(unsigned cpu)
223 {
224         int flag;
225
226         switch (per_cpu(cpu_model, cpu)) {
227         /* Intel */
228         case 0x0501:
229         case 0x0502:
230         case 0x0504:
231                 flag = CPU_INTEL_PENTIUM;
232                 break;
233         case 0x0601:
234         case 0x0603:
235         case 0x0605:
236         case 0x0607:
237         case 0x0608:
238         case 0x060A:
239         case 0x060B:
240                 flag = CPU_INTEL_P6;
241                 break;
242         case 0x0609:
243         case 0x060D:
244                 flag = CPU_INTEL_PENTIUM_M;
245                 break;
246         case 0x060E:
247                 flag = CPU_INTEL_CORE;
248                 break;
249         case 0x060F:
250         case 0x0617:
251                 flag = CPU_INTEL_CORE2;
252                 break;
253         case 0x061C:
254                 flag = CPU_INTEL_ATOM;
255                 break;
256         case 0x0F00:
257         case 0x0F01:
258         case 0x0F02:
259         case 0x0F03:
260         case 0x0F04:
261                 flag = CPU_INTEL_XEON_P4;
262                 break;
263         case 0x0F06:
264                 flag = CPU_INTEL_XEON_MP;
265                 break;
266         default:
267                 flag = CPU_NONE;
268                 break;
269         }
270
271         return flag;
272 }
273
274 static int get_cpu_range_count(unsigned cpu)
275 {
276         int index;
277
278         switch (per_cpu(cpu_model, cpu) >> 16) {
279         case X86_VENDOR_INTEL:
280                 index = ARRAY_SIZE(cpu_intel_range);
281                 break;
282         case X86_VENDOR_AMD:
283                 index = ARRAY_SIZE(cpu_amd_range);
284                 break;
285         default:
286                 index = 0;
287                 break;
288         }
289
290         return index;
291 }
292
293 static int is_typeflag_valid(unsigned cpu, unsigned flag)
294 {
295         unsigned vendor, modelflag;
296         int i, index;
297
298         /* Standard Registers should be always valid */
299         if (flag >= CPU_TSS)
300                 return 1;
301
302         modelflag = per_cpu(cpu_modelflag, cpu);
303         vendor = per_cpu(cpu_model, cpu) >> 16;
304         index = get_cpu_range_count(cpu);
305
306         for (i = 0; i < index; i++) {
307                 switch (vendor) {
308                 case X86_VENDOR_INTEL:
309                         if ((cpu_intel_range[i].model & modelflag) &&
310                             (cpu_intel_range[i].flag & flag))
311                                 return 1;
312                         break;
313                 case X86_VENDOR_AMD:
314                         if (cpu_amd_range[i].flag & flag)
315                                 return 1;
316                         break;
317                 }
318         }
319
320         /* Invalid */
321         return 0;
322 }
323
324 static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
325                               int index, unsigned flag)
326 {
327         unsigned modelflag;
328
329         modelflag = per_cpu(cpu_modelflag, cpu);
330         *max = 0;
331         switch (per_cpu(cpu_model, cpu) >> 16) {
332         case X86_VENDOR_INTEL:
333                 if ((cpu_intel_range[index].model & modelflag) &&
334                     (cpu_intel_range[index].flag & flag)) {
335                         *min = cpu_intel_range[index].min;
336                         *max = cpu_intel_range[index].max;
337                 }
338                 break;
339         case X86_VENDOR_AMD:
340                 if (cpu_amd_range[index].flag & flag) {
341                         *min = cpu_amd_range[index].min;
342                         *max = cpu_amd_range[index].max;
343                 }
344                 break;
345         }
346
347         return *max;
348 }
349
350 /* This function can also be called with seq = NULL for printk */
351 static void print_cpu_data(struct seq_file *seq, unsigned type,
352                            u32 low, u32 high)
353 {
354         struct cpu_private *priv;
355         u64 val = high;
356
357         if (seq) {
358                 priv = seq->private;
359                 if (priv->file) {
360                         val = (val << 32) | low;
361                         seq_printf(seq, "0x%llx\n", val);
362                 } else
363                         seq_printf(seq, " %08x: %08x_%08x\n",
364                                    type, high, low);
365         } else
366                 printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
367 }
368
369 /* This function can also be called with seq = NULL for printk */
370 static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
371 {
372         unsigned msr, msr_min, msr_max;
373         struct cpu_private *priv;
374         u32 low, high;
375         int i, range;
376
377         if (seq) {
378                 priv = seq->private;
379                 if (priv->file) {
380                         if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
381                                                &low, &high))
382                                 print_cpu_data(seq, priv->reg, low, high);
383                         return;
384                 }
385         }
386
387         range = get_cpu_range_count(cpu);
388
389         for (i = 0; i < range; i++) {
390                 if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
391                         continue;
392
393                 for (msr = msr_min; msr <= msr_max; msr++) {
394                         if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
395                                 continue;
396                         print_cpu_data(seq, msr, low, high);
397                 }
398         }
399 }
400
401 static void print_tss(void *arg)
402 {
403         struct pt_regs *regs = task_pt_regs(current);
404         struct seq_file *seq = arg;
405         unsigned int seg;
406
407         seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
408         seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
409         seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
410         seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
411
412         seq_printf(seq, " RSI\t: %016lx\n", regs->si);
413         seq_printf(seq, " RDI\t: %016lx\n", regs->di);
414         seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
415         seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
416
417 #ifdef CONFIG_X86_64
418         seq_printf(seq, " R08\t: %016lx\n", regs->r8);
419         seq_printf(seq, " R09\t: %016lx\n", regs->r9);
420         seq_printf(seq, " R10\t: %016lx\n", regs->r10);
421         seq_printf(seq, " R11\t: %016lx\n", regs->r11);
422         seq_printf(seq, " R12\t: %016lx\n", regs->r12);
423         seq_printf(seq, " R13\t: %016lx\n", regs->r13);
424         seq_printf(seq, " R14\t: %016lx\n", regs->r14);
425         seq_printf(seq, " R15\t: %016lx\n", regs->r15);
426 #endif
427
428         asm("movl %%cs,%0" : "=r" (seg));
429         seq_printf(seq, " CS\t:             %04x\n", seg);
430         asm("movl %%ds,%0" : "=r" (seg));
431         seq_printf(seq, " DS\t:             %04x\n", seg);
432         seq_printf(seq, " SS\t:             %04lx\n", regs->ss & 0xffff);
433         asm("movl %%es,%0" : "=r" (seg));
434         seq_printf(seq, " ES\t:             %04x\n", seg);
435         asm("movl %%fs,%0" : "=r" (seg));
436         seq_printf(seq, " FS\t:             %04x\n", seg);
437         asm("movl %%gs,%0" : "=r" (seg));
438         seq_printf(seq, " GS\t:             %04x\n", seg);
439
440         seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
441
442         seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
443 }
444
445 static void print_cr(void *arg)
446 {
447         struct seq_file *seq = arg;
448
449         seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
450         seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
451         seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
452         seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
453 #ifdef CONFIG_X86_64
454         seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
455 #endif
456 }
457
458 static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
459 {
460         seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
461 }
462
463 static void print_dt(void *seq)
464 {
465         struct desc_ptr dt;
466         unsigned long ldt;
467
468         /* IDT */
469         store_idt((struct desc_ptr *)&dt);
470         print_desc_ptr("IDT", seq, dt);
471
472         /* GDT */
473         store_gdt((struct desc_ptr *)&dt);
474         print_desc_ptr("GDT", seq, dt);
475
476         /* LDT */
477         store_ldt(ldt);
478         seq_printf(seq, " LDT\t: %016lx\n", ldt);
479
480         /* TR */
481         store_tr(ldt);
482         seq_printf(seq, " TR\t: %016lx\n", ldt);
483 }
484
485 static void print_dr(void *arg)
486 {
487         struct seq_file *seq = arg;
488         unsigned long dr;
489         int i;
490
491         for (i = 0; i < 8; i++) {
492                 /* Ignore db4, db5 */
493                 if ((i == 4) || (i == 5))
494                         continue;
495                 get_debugreg(dr, i);
496                 seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
497         }
498
499         seq_printf(seq, "\n MSR\t:\n");
500 }
501
502 static void print_apic(void *arg)
503 {
504         struct seq_file *seq = arg;
505
506 #ifdef CONFIG_X86_LOCAL_APIC
507         seq_printf(seq, " LAPIC\t:\n");
508         seq_printf(seq, " ID\t\t: %08x\n",  apic_read(APIC_ID) >> 24);
509         seq_printf(seq, " LVR\t\t: %08x\n",  apic_read(APIC_LVR));
510         seq_printf(seq, " TASKPRI\t: %08x\n",  apic_read(APIC_TASKPRI));
511         seq_printf(seq, " ARBPRI\t\t: %08x\n",  apic_read(APIC_ARBPRI));
512         seq_printf(seq, " PROCPRI\t: %08x\n",  apic_read(APIC_PROCPRI));
513         seq_printf(seq, " LDR\t\t: %08x\n",  apic_read(APIC_LDR));
514         seq_printf(seq, " DFR\t\t: %08x\n",  apic_read(APIC_DFR));
515         seq_printf(seq, " SPIV\t\t: %08x\n",  apic_read(APIC_SPIV));
516         seq_printf(seq, " ISR\t\t: %08x\n",  apic_read(APIC_ISR));
517         seq_printf(seq, " ESR\t\t: %08x\n",  apic_read(APIC_ESR));
518         seq_printf(seq, " ICR\t\t: %08x\n",  apic_read(APIC_ICR));
519         seq_printf(seq, " ICR2\t\t: %08x\n",  apic_read(APIC_ICR2));
520         seq_printf(seq, " LVTT\t\t: %08x\n",  apic_read(APIC_LVTT));
521         seq_printf(seq, " LVTTHMR\t: %08x\n",  apic_read(APIC_LVTTHMR));
522         seq_printf(seq, " LVTPC\t\t: %08x\n",  apic_read(APIC_LVTPC));
523         seq_printf(seq, " LVT0\t\t: %08x\n",  apic_read(APIC_LVT0));
524         seq_printf(seq, " LVT1\t\t: %08x\n",  apic_read(APIC_LVT1));
525         seq_printf(seq, " LVTERR\t\t: %08x\n",  apic_read(APIC_LVTERR));
526         seq_printf(seq, " TMICT\t\t: %08x\n",  apic_read(APIC_TMICT));
527         seq_printf(seq, " TMCCT\t\t: %08x\n",  apic_read(APIC_TMCCT));
528         seq_printf(seq, " TDCR\t\t: %08x\n",  apic_read(APIC_TDCR));
529 #endif /* CONFIG_X86_LOCAL_APIC */
530
531         seq_printf(seq, "\n MSR\t:\n");
532 }
533
534 static int cpu_seq_show(struct seq_file *seq, void *v)
535 {
536         struct cpu_private *priv = seq->private;
537
538         if (priv == NULL)
539                 return -EINVAL;
540
541         switch (cpu_base[priv->type].flag) {
542         case CPU_TSS:
543                 smp_call_function_single(priv->cpu, print_tss, seq, 1);
544                 break;
545         case CPU_CR:
546                 smp_call_function_single(priv->cpu, print_cr, seq, 1);
547                 break;
548         case CPU_DT:
549                 smp_call_function_single(priv->cpu, print_dt, seq, 1);
550                 break;
551         case CPU_DEBUG:
552                 if (priv->file == CPU_INDEX_BIT)
553                         smp_call_function_single(priv->cpu, print_dr, seq, 1);
554                 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
555                 break;
556         case CPU_APIC:
557                 if (priv->file == CPU_INDEX_BIT)
558                         smp_call_function_single(priv->cpu, print_apic, seq, 1);
559                 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
560                 break;
561
562         default:
563                 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
564                 break;
565         }
566         seq_printf(seq, "\n");
567
568         return 0;
569 }
570
571 static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
572 {
573         if (*pos == 0) /* One time is enough ;-) */
574                 return seq;
575
576         return NULL;
577 }
578
579 static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
580 {
581         (*pos)++;
582
583         return cpu_seq_start(seq, pos);
584 }
585
586 static void cpu_seq_stop(struct seq_file *seq, void *v)
587 {
588 }
589
590 static const struct seq_operations cpu_seq_ops = {
591         .start          = cpu_seq_start,
592         .next           = cpu_seq_next,
593         .stop           = cpu_seq_stop,
594         .show           = cpu_seq_show,
595 };
596
597 static int cpu_seq_open(struct inode *inode, struct file *file)
598 {
599         struct cpu_private *priv = inode->i_private;
600         struct seq_file *seq;
601         int err;
602
603         err = seq_open(file, &cpu_seq_ops);
604         if (!err) {
605                 seq = file->private_data;
606                 seq->private = priv;
607         }
608
609         return err;
610 }
611
612 static int write_msr(struct cpu_private *priv, u64 val)
613 {
614         u32 low, high;
615
616         high = (val >> 32) & 0xffffffff;
617         low = val & 0xffffffff;
618
619         if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
620                 return 0;
621
622         return -EPERM;
623 }
624
625 static int write_cpu_register(struct cpu_private *priv, const char *buf)
626 {
627         int ret = -EPERM;
628         u64 val;
629
630         ret = strict_strtoull(buf, 0, &val);
631         if (ret < 0)
632                 return ret;
633
634         /* Supporting only MSRs */
635         if (priv->type < CPU_TSS_BIT)
636                 return write_msr(priv, val);
637
638         return ret;
639 }
640
641 static ssize_t cpu_write(struct file *file, const char __user *ubuf,
642                              size_t count, loff_t *off)
643 {
644         struct seq_file *seq = file->private_data;
645         struct cpu_private *priv = seq->private;
646         char buf[19];
647
648         if ((priv == NULL) || (count >= sizeof(buf)))
649                 return -EINVAL;
650
651         if (copy_from_user(&buf, ubuf, count))
652                 return -EFAULT;
653
654         buf[count] = 0;
655
656         if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
657                 if (!write_cpu_register(priv, buf))
658                         return count;
659
660         return -EACCES;
661 }
662
663 static const struct file_operations cpu_fops = {
664         .owner          = THIS_MODULE,
665         .open           = cpu_seq_open,
666         .read           = seq_read,
667         .write          = cpu_write,
668         .llseek         = seq_lseek,
669         .release        = seq_release,
670 };
671
672 static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
673                            unsigned file, struct dentry *dentry)
674 {
675         struct cpu_private *priv = NULL;
676
677         /* Already intialized */
678         if (file == CPU_INDEX_BIT)
679                 if (per_cpu(cpu_arr[type].init, cpu))
680                         return 0;
681
682         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
683         if (priv == NULL)
684                 return -ENOMEM;
685
686         priv->cpu = cpu;
687         priv->type = type;
688         priv->reg = reg;
689         priv->file = file;
690         mutex_lock(&cpu_debug_lock);
691         per_cpu(priv_arr[type], cpu) = priv;
692         per_cpu(cpu_priv_count, cpu)++;
693         mutex_unlock(&cpu_debug_lock);
694
695         if (file)
696                 debugfs_create_file(cpu_file[file].name, S_IRUGO,
697                                     dentry, (void *)priv, &cpu_fops);
698         else {
699                 debugfs_create_file(cpu_base[type].name, S_IRUGO,
700                                     per_cpu(cpu_arr[type].dentry, cpu),
701                                     (void *)priv, &cpu_fops);
702                 mutex_lock(&cpu_debug_lock);
703                 per_cpu(cpu_arr[type].init, cpu) = 1;
704                 mutex_unlock(&cpu_debug_lock);
705         }
706
707         return 0;
708 }
709
710 static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
711                              struct dentry *dentry)
712 {
713         unsigned file;
714         int err = 0;
715
716         for (file = 0; file <  ARRAY_SIZE(cpu_file); file++) {
717                 err = cpu_create_file(cpu, type, reg, file, dentry);
718                 if (err)
719                         return err;
720         }
721
722         return err;
723 }
724
725 static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
726 {
727         struct dentry *cpu_dentry = NULL;
728         unsigned reg, reg_min, reg_max;
729         int i, range, err = 0;
730         char reg_dir[12];
731         u32 low, high;
732
733         range = get_cpu_range_count(cpu);
734
735         for (i = 0; i < range; i++) {
736                 if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
737                                    cpu_base[type].flag))
738                         continue;
739
740                 for (reg = reg_min; reg <= reg_max; reg++) {
741                         if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
742                                 continue;
743
744                         sprintf(reg_dir, "0x%x", reg);
745                         cpu_dentry = debugfs_create_dir(reg_dir, dentry);
746                         err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
747                         if (err)
748                                 return err;
749                 }
750         }
751
752         return err;
753 }
754
755 static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
756 {
757         struct dentry *cpu_dentry = NULL;
758         unsigned type;
759         int err = 0;
760
761         for (type = 0; type <  ARRAY_SIZE(cpu_base) - 1; type++) {
762                 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
763                         continue;
764                 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
765                 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
766
767                 if (type < CPU_TSS_BIT)
768                         err = cpu_init_msr(cpu, type, cpu_dentry);
769                 else
770                         err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
771                                               cpu_dentry);
772                 if (err)
773                         return err;
774         }
775
776         return err;
777 }
778
779 static int cpu_init_cpu(void)
780 {
781         struct dentry *cpu_dentry = NULL;
782         struct cpuinfo_x86 *cpui;
783         char cpu_dir[12];
784         unsigned cpu;
785         int err = 0;
786
787         for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
788                 cpui = &cpu_data(cpu);
789                 if (!cpu_has(cpui, X86_FEATURE_MSR))
790                         continue;
791                 per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
792                                            (cpui->x86 << 8) |
793                                            (cpui->x86_model));
794                 per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
795
796                 sprintf(cpu_dir, "cpu%d", cpu);
797                 cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
798                 err = cpu_init_allreg(cpu, cpu_dentry);
799
800                 pr_info("cpu%d(%d) debug files %d\n",
801                         cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
802                 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
803                         pr_err("Register files count %d exceeds limit %d\n",
804                                 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
805                         per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
806                         err = -ENFILE;
807                 }
808                 if (err)
809                         return err;
810         }
811
812         return err;
813 }
814
815 static int __init cpu_debug_init(void)
816 {
817         cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
818
819         return cpu_init_cpu();
820 }
821
822 static void __exit cpu_debug_exit(void)
823 {
824         int i, cpu;
825
826         if (cpu_debugfs_dir)
827                 debugfs_remove_recursive(cpu_debugfs_dir);
828
829         for (cpu = 0; cpu <  nr_cpu_ids; cpu++)
830                 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
831                         kfree(per_cpu(priv_arr[i], cpu));
832 }
833
834 module_init(cpu_debug_init);
835 module_exit(cpu_debug_exit);
836
837 MODULE_AUTHOR("Jaswinder Singh Rajput");
838 MODULE_DESCRIPTION("CPU Debug module");
839 MODULE_LICENSE("GPL");