]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/s390/kernel/topology.c
369dc1c3bd10a7b1a0019933bb64e46ce1cf7273
[linux-2.6-omap-h63xx.git] / arch / s390 / kernel / topology.c
1 /*
2  *  arch/s390/kernel/topology.c
3  *
4  *    Copyright IBM Corp. 2007
5  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/bootmem.h>
13 #include <linux/sched.h>
14 #include <linux/workqueue.h>
15 #include <linux/cpu.h>
16 #include <linux/smp.h>
17 #include <asm/delay.h>
18 #include <asm/s390_ext.h>
19 #include <asm/sysinfo.h>
20
21 #define CPU_BITS 64
22
23 struct tl_cpu {
24         unsigned char reserved[6];
25         unsigned short origin;
26         unsigned long mask[CPU_BITS / BITS_PER_LONG];
27 };
28
29 struct tl_container {
30         unsigned char reserved[8];
31 };
32
33 union tl_entry {
34         unsigned char nl;
35         struct tl_cpu cpu;
36         struct tl_container container;
37 };
38
39 #define NR_MAG 6
40
41 struct tl_info {
42         unsigned char reserved0[2];
43         unsigned short length;
44         unsigned char mag[NR_MAG];
45         unsigned char reserved1;
46         unsigned char mnest;
47         unsigned char reserved2[4];
48         union tl_entry tle[0];
49 };
50
51 struct core_info {
52         struct core_info *next;
53         cpumask_t mask;
54 };
55
56 static void topology_work_fn(struct work_struct *work);
57 static struct tl_info *tl_info;
58 static struct core_info core_info;
59 static int machine_has_topology;
60 static int machine_has_topology_irq;
61 static struct timer_list topology_timer;
62 static void set_topology_timer(void);
63 static DECLARE_WORK(topology_work, topology_work_fn);
64
65 cpumask_t cpu_coregroup_map(unsigned int cpu)
66 {
67         struct core_info *core = &core_info;
68         cpumask_t mask;
69
70         cpus_clear(mask);
71         if (!machine_has_topology)
72                 return cpu_present_map;
73         mutex_lock(&smp_cpu_state_mutex);
74         while (core) {
75                 if (cpu_isset(cpu, core->mask)) {
76                         mask = core->mask;
77                         break;
78                 }
79                 core = core->next;
80         }
81         mutex_unlock(&smp_cpu_state_mutex);
82         if (cpus_empty(mask))
83                 mask = cpumask_of_cpu(cpu);
84         return mask;
85 }
86
87 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
88 {
89         unsigned int cpu;
90
91         for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
92              cpu < CPU_BITS;
93              cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
94         {
95                 unsigned int rcpu, lcpu;
96
97                 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
98                 for_each_present_cpu(lcpu) {
99                         if (__cpu_logical_map[lcpu] == rcpu)
100                                 cpu_set(lcpu, core->mask);
101                 }
102         }
103 }
104
105 static void clear_cores(void)
106 {
107         struct core_info *core = &core_info;
108
109         while (core) {
110                 cpus_clear(core->mask);
111                 core = core->next;
112         }
113 }
114
115 static union tl_entry *next_tle(union tl_entry *tle)
116 {
117         if (tle->nl)
118                 return (union tl_entry *)((struct tl_container *)tle + 1);
119         else
120                 return (union tl_entry *)((struct tl_cpu *)tle + 1);
121 }
122
123 static void tl_to_cores(struct tl_info *info)
124 {
125         union tl_entry *tle, *end;
126         struct core_info *core = &core_info;
127
128         mutex_lock(&smp_cpu_state_mutex);
129         clear_cores();
130         tle = (union tl_entry *)&info->tle;
131         end = (union tl_entry *)((unsigned long)info + info->length);
132         while (tle < end) {
133                 switch (tle->nl) {
134                 case 5:
135                 case 4:
136                 case 3:
137                 case 2:
138                         break;
139                 case 1:
140                         core = core->next;
141                         break;
142                 case 0:
143                         add_cpus_to_core(&tle->cpu, core);
144                         break;
145                 default:
146                         clear_cores();
147                         machine_has_topology = 0;
148                         return;
149                 }
150                 tle = next_tle(tle);
151         }
152         mutex_unlock(&smp_cpu_state_mutex);
153 }
154
155 static int ptf(void)
156 {
157         int rc;
158
159         asm volatile(
160                 "       .insn   rre,0xb9a20000,%1,%1\n"
161                 "       ipm     %0\n"
162                 "       srl     %0,28\n"
163                 : "=d" (rc)
164                 : "d" (2UL)  : "cc");
165         return rc;
166 }
167
168 void arch_update_cpu_topology(void)
169 {
170         struct tl_info *info = tl_info;
171         struct sys_device *sysdev;
172         int cpu;
173
174         if (!machine_has_topology)
175                 return;
176         ptf();
177         stsi(info, 15, 1, 2);
178         tl_to_cores(info);
179         for_each_online_cpu(cpu) {
180                 sysdev = get_cpu_sysdev(cpu);
181                 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
182         }
183 }
184
185 static void topology_work_fn(struct work_struct *work)
186 {
187         arch_reinit_sched_domains();
188 }
189
190 static void topology_timer_fn(unsigned long ignored)
191 {
192         if (ptf())
193                 schedule_work(&topology_work);
194         set_topology_timer();
195 }
196
197 static void set_topology_timer(void)
198 {
199         topology_timer.function = topology_timer_fn;
200         topology_timer.data = 0;
201         topology_timer.expires = jiffies + 60 * HZ;
202         add_timer(&topology_timer);
203 }
204
205 static void topology_interrupt(__u16 code)
206 {
207         schedule_work(&topology_work);
208 }
209
210 static int __init init_topology_update(void)
211 {
212         int rc;
213
214         if (!machine_has_topology)
215                 return 0;
216         init_timer(&topology_timer);
217         if (machine_has_topology_irq) {
218                 rc = register_external_interrupt(0x2005, topology_interrupt);
219                 if (rc)
220                         return rc;
221                 ctl_set_bit(0, 8);
222         }
223         else
224                 set_topology_timer();
225         return 0;
226 }
227 __initcall(init_topology_update);
228
229 void __init s390_init_cpu_topology(void)
230 {
231         unsigned long long facility_bits;
232         struct tl_info *info;
233         struct core_info *core;
234         int nr_cores;
235         int i;
236
237         if (stfle(&facility_bits, 1) <= 0)
238                 return;
239         if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
240                 return;
241         machine_has_topology = 1;
242
243         if (facility_bits & (1ULL << 51))
244                 machine_has_topology_irq = 1;
245
246         tl_info = alloc_bootmem_pages(PAGE_SIZE);
247         if (!tl_info)
248                 goto error;
249         info = tl_info;
250         stsi(info, 15, 1, 2);
251
252         nr_cores = info->mag[NR_MAG - 2];
253         for (i = 0; i < info->mnest - 2; i++)
254                 nr_cores *= info->mag[NR_MAG - 3 - i];
255
256         printk(KERN_INFO "CPU topology:");
257         for (i = 0; i < NR_MAG; i++)
258                 printk(" %d", info->mag[i]);
259         printk(" / %d\n", info->mnest);
260
261         core = &core_info;
262         for (i = 0; i < nr_cores; i++) {
263                 core->next = alloc_bootmem(sizeof(struct core_info));
264                 core = core->next;
265                 if (!core)
266                         goto error;
267         }
268         return;
269 error:
270         machine_has_topology = 0;
271         machine_has_topology_irq = 0;
272 }