2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
29 unsigned char descriptor;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table[] __cpuinitdata =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
49 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
54 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
59 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
67 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
74 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
75 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
97 CACHE_TYPE_UNIFIED = 3
100 union _cpuid4_leaf_eax {
102 enum _cache_type type:5;
103 unsigned int level:3;
104 unsigned int is_self_initializing:1;
105 unsigned int is_fully_associative:1;
106 unsigned int reserved:4;
107 unsigned int num_threads_sharing:12;
108 unsigned int num_cores_on_die:6;
113 union _cpuid4_leaf_ebx {
115 unsigned int coherency_line_size:12;
116 unsigned int physical_line_partition:10;
117 unsigned int ways_of_associativity:10;
122 union _cpuid4_leaf_ecx {
124 unsigned int number_of_sets:32;
129 struct _cpuid4_info {
130 union _cpuid4_leaf_eax eax;
131 union _cpuid4_leaf_ebx ebx;
132 union _cpuid4_leaf_ecx ecx;
134 unsigned long can_disable;
135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
138 unsigned short num_cache_leaves;
140 /* AMD doesn't have CPUID4. Emulate it here to report the same
141 information to the user. This makes some assumptions about the machine:
142 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
144 In theory the TLBs could be reported as fake type (they are in "dummy").
148 unsigned line_size : 8;
149 unsigned lines_per_tag : 8;
151 unsigned size_in_kb : 8;
158 unsigned line_size : 8;
159 unsigned lines_per_tag : 4;
161 unsigned size_in_kb : 16;
168 unsigned line_size : 8;
169 unsigned lines_per_tag : 4;
172 unsigned size_encoded : 14;
177 static unsigned short assocs[] __cpuinitdata = {
178 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
179 [8] = 16, [0xa] = 32, [0xb] = 48,
184 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
185 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
187 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
188 union _cpuid4_leaf_ebx *ebx,
189 union _cpuid4_leaf_ecx *ecx)
192 unsigned line_size, lines_per_tag, assoc, size_in_kb;
193 union l1_cache l1i, l1d;
196 union l1_cache *l1 = &l1d;
202 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
203 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
212 line_size = l1->line_size;
213 lines_per_tag = l1->lines_per_tag;
214 size_in_kb = l1->size_in_kb;
220 line_size = l2.line_size;
221 lines_per_tag = l2.lines_per_tag;
222 /* cpu_data has errata corrections for K7 applied */
223 size_in_kb = current_cpu_data.x86_cache_size;
229 line_size = l3.line_size;
230 lines_per_tag = l3.lines_per_tag;
231 size_in_kb = l3.size_encoded * 512;
237 eax->split.is_self_initializing = 1;
238 eax->split.type = types[leaf];
239 eax->split.level = levels[leaf];
241 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
243 eax->split.num_threads_sharing = 0;
244 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
248 eax->split.is_fully_associative = 1;
249 ebx->split.coherency_line_size = line_size - 1;
250 ebx->split.ways_of_associativity = assocs[assoc] - 1;
251 ebx->split.physical_line_partition = lines_per_tag - 1;
252 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
253 (ebx->split.ways_of_associativity + 1) - 1;
256 static void __cpuinit amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
260 this_leaf->can_disable = 1;
263 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
265 union _cpuid4_leaf_eax eax;
266 union _cpuid4_leaf_ebx ebx;
267 union _cpuid4_leaf_ecx ecx;
270 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
271 amd_cpuid4(index, &eax, &ebx, &ecx);
272 if (boot_cpu_data.x86 >= 0x10)
273 amd_check_l3_disable(index, this_leaf);
276 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
277 if (eax.split.type == CACHE_TYPE_NULL)
278 return -EIO; /* better error ? */
280 this_leaf->eax = eax;
281 this_leaf->ebx = ebx;
282 this_leaf->ecx = ecx;
283 this_leaf->size = (ecx.split.number_of_sets + 1) *
284 (ebx.split.coherency_line_size + 1) *
285 (ebx.split.physical_line_partition + 1) *
286 (ebx.split.ways_of_associativity + 1);
290 static int __cpuinit find_num_cache_leaves(void)
292 unsigned int eax, ebx, ecx, edx;
293 union _cpuid4_leaf_eax cache_eax;
298 /* Do cpuid(4) loop to find out num_cache_leaves */
299 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
300 cache_eax.full = eax;
301 } while (cache_eax.split.type != CACHE_TYPE_NULL);
305 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
307 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
308 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
309 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
310 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
312 unsigned int cpu = c->cpu_index;
315 if (c->cpuid_level > 3) {
316 static int is_initialized;
318 if (is_initialized == 0) {
319 /* Init num_cache_leaves from boot CPU */
320 num_cache_leaves = find_num_cache_leaves();
325 * Whenever possible use cpuid(4), deterministic cache
326 * parameters cpuid leaf to find the cache details
328 for (i = 0; i < num_cache_leaves; i++) {
329 struct _cpuid4_info this_leaf;
333 retval = cpuid4_cache_lookup(i, &this_leaf);
335 switch(this_leaf.eax.split.level) {
337 if (this_leaf.eax.split.type ==
339 new_l1d = this_leaf.size/1024;
340 else if (this_leaf.eax.split.type ==
342 new_l1i = this_leaf.size/1024;
345 new_l2 = this_leaf.size/1024;
346 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
347 index_msb = get_count_order(num_threads_sharing);
348 l2_id = c->apicid >> index_msb;
351 new_l3 = this_leaf.size/1024;
352 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
353 index_msb = get_count_order(num_threads_sharing);
354 l3_id = c->apicid >> index_msb;
363 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
366 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
367 /* supports eax=2 call */
369 unsigned int regs[4];
370 unsigned char *dp = (unsigned char *)regs;
373 if (num_cache_leaves != 0 && c->x86 == 15)
376 /* Number of times to iterate */
377 n = cpuid_eax(2) & 0xFF;
379 for ( i = 0 ; i < n ; i++ ) {
380 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
382 /* If bit 31 is set, this is an unknown format */
383 for ( j = 0 ; j < 3 ; j++ ) {
384 if (regs[j] & (1 << 31)) regs[j] = 0;
387 /* Byte 0 is level count, not a descriptor */
388 for ( j = 1 ; j < 16 ; j++ ) {
389 unsigned char des = dp[j];
392 /* look up this descriptor in the table */
393 while (cache_table[k].descriptor != 0)
395 if (cache_table[k].descriptor == des) {
396 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
398 switch (cache_table[k].cache_type) {
400 l1i += cache_table[k].size;
403 l1d += cache_table[k].size;
406 l2 += cache_table[k].size;
409 l3 += cache_table[k].size;
412 trace += cache_table[k].size;
434 per_cpu(cpu_llc_id, cpu) = l2_id;
441 per_cpu(cpu_llc_id, cpu) = l3_id;
446 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
448 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
451 printk(", L1 D cache: %dK\n", l1d);
456 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
459 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
461 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
466 /* pointer to _cpuid4_info array (for each cache leaf) */
467 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
468 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
471 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
473 struct _cpuid4_info *this_leaf, *sibling_leaf;
474 unsigned long num_threads_sharing;
476 struct cpuinfo_x86 *c = &cpu_data(cpu);
478 this_leaf = CPUID4_INFO_IDX(cpu, index);
479 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
481 if (num_threads_sharing == 1)
482 cpu_set(cpu, this_leaf->shared_cpu_map);
484 index_msb = get_count_order(num_threads_sharing);
486 for_each_online_cpu(i) {
487 if (cpu_data(i).apicid >> index_msb ==
488 c->apicid >> index_msb) {
489 cpu_set(i, this_leaf->shared_cpu_map);
490 if (i != cpu && per_cpu(cpuid4_info, i)) {
491 sibling_leaf = CPUID4_INFO_IDX(i, index);
492 cpu_set(cpu, sibling_leaf->shared_cpu_map);
498 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
500 struct _cpuid4_info *this_leaf, *sibling_leaf;
503 this_leaf = CPUID4_INFO_IDX(cpu, index);
504 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
505 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
506 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
510 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
511 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
514 static void __cpuinit free_cache_attributes(unsigned int cpu)
518 for (i = 0; i < num_cache_leaves; i++)
519 cache_remove_shared_cpu_map(cpu, i);
521 kfree(per_cpu(cpuid4_info, cpu));
522 per_cpu(cpuid4_info, cpu) = NULL;
525 static int __cpuinit detect_cache_attributes(unsigned int cpu)
527 struct _cpuid4_info *this_leaf;
532 if (num_cache_leaves == 0)
535 per_cpu(cpuid4_info, cpu) = kzalloc(
536 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
537 if (per_cpu(cpuid4_info, cpu) == NULL)
540 oldmask = current->cpus_allowed;
541 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
545 /* Do cpuid and store the results */
546 for (j = 0; j < num_cache_leaves; j++) {
547 this_leaf = CPUID4_INFO_IDX(cpu, j);
548 retval = cpuid4_cache_lookup(j, this_leaf);
549 if (unlikely(retval < 0)) {
552 for (i = 0; i < j; i++)
553 cache_remove_shared_cpu_map(cpu, i);
556 cache_shared_cpu_map_setup(cpu, j);
558 set_cpus_allowed_ptr(current, &oldmask);
562 kfree(per_cpu(cpuid4_info, cpu));
563 per_cpu(cpuid4_info, cpu) = NULL;
571 #include <linux/kobject.h>
572 #include <linux/sysfs.h>
574 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
576 /* pointer to kobject for cpuX/cache */
577 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
579 struct _index_kobject {
582 unsigned short index;
585 /* pointer to array of kobjects for cpuX/cache/indexY */
586 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
587 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
589 #define show_one_plus(file_name, object, val) \
590 static ssize_t show_##file_name \
591 (struct _cpuid4_info *this_leaf, char *buf) \
593 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
596 show_one_plus(level, eax.split.level, 0);
597 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
598 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
599 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
600 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
602 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
604 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
607 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
610 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
614 cpumask_t *mask = &this_leaf->shared_cpu_map;
617 cpulist_scnprintf(buf, len-2, *mask):
618 cpumask_scnprintf(buf, len-2, *mask);
625 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
627 return show_shared_cpu_map_func(leaf, 0, buf);
630 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
632 return show_shared_cpu_map_func(leaf, 1, buf);
635 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
636 switch(this_leaf->eax.split.type) {
637 case CACHE_TYPE_DATA:
638 return sprintf(buf, "Data\n");
640 case CACHE_TYPE_INST:
641 return sprintf(buf, "Instruction\n");
643 case CACHE_TYPE_UNIFIED:
644 return sprintf(buf, "Unified\n");
647 return sprintf(buf, "Unknown\n");
652 #define to_object(k) container_of(k, struct _index_kobject, kobj)
653 #define to_attr(a) container_of(a, struct _cache_attr, attr)
655 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
658 if (this_leaf->can_disable) {
661 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
662 dev = k8_northbridges[node];
664 for (i = 0; i < 2; i++) {
666 pci_read_config_dword(dev, 0x1BC + i * 4, ®);
667 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
668 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
670 reg & 0x80000000 ? "Disabled" : "Allowed",
671 reg & 0x40000000 ? "Disabled" : "Allowed");
672 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", buf,
673 (reg & 0x30000) >> 16, reg & 0xfff);
678 return sprintf(buf, "Feature not enabled\n");
681 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, size_t count)
684 if (this_leaf->can_disable) {
685 /* write the MSR value */
687 unsigned int index, val;
688 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
689 dev = k8_northbridges[node];
691 if (strlen(buf) > 15)
693 ret = sscanf(buf, "%x %x", &index, &val);
699 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
701 pci_write_config_dword(dev, 0x1BC + index * 4, val);
708 struct attribute attr;
709 ssize_t (*show)(struct _cpuid4_info *, char *);
710 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
713 #define define_one_ro(_name) \
714 static struct _cache_attr _name = \
715 __ATTR(_name, 0444, show_##_name, NULL)
717 define_one_ro(level);
719 define_one_ro(coherency_line_size);
720 define_one_ro(physical_line_partition);
721 define_one_ro(ways_of_associativity);
722 define_one_ro(number_of_sets);
724 define_one_ro(shared_cpu_map);
725 define_one_ro(shared_cpu_list);
727 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
729 static struct attribute * default_attrs[] = {
732 &coherency_line_size.attr,
733 &physical_line_partition.attr,
734 &ways_of_associativity.attr,
735 &number_of_sets.attr,
737 &shared_cpu_map.attr,
738 &shared_cpu_list.attr,
743 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
745 struct _cache_attr *fattr = to_attr(attr);
746 struct _index_kobject *this_leaf = to_object(kobj);
750 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
756 static ssize_t store(struct kobject * kobj, struct attribute * attr,
757 const char * buf, size_t count)
759 struct _cache_attr *fattr = to_attr(attr);
760 struct _index_kobject *this_leaf = to_object(kobj);
764 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
770 static struct sysfs_ops sysfs_ops = {
775 static struct kobj_type ktype_cache = {
776 .sysfs_ops = &sysfs_ops,
777 .default_attrs = default_attrs,
780 static struct kobj_type ktype_percpu_entry = {
781 .sysfs_ops = &sysfs_ops,
784 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
786 kfree(per_cpu(cache_kobject, cpu));
787 kfree(per_cpu(index_kobject, cpu));
788 per_cpu(cache_kobject, cpu) = NULL;
789 per_cpu(index_kobject, cpu) = NULL;
790 free_cache_attributes(cpu);
793 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
797 if (num_cache_leaves == 0)
800 err = detect_cache_attributes(cpu);
804 /* Allocate all required memory */
805 per_cpu(cache_kobject, cpu) =
806 kzalloc(sizeof(struct kobject), GFP_KERNEL);
807 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
810 per_cpu(index_kobject, cpu) = kzalloc(
811 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
812 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
818 cpuid4_cache_sysfs_exit(cpu);
822 static cpumask_t cache_dev_map = CPU_MASK_NONE;
824 /* Add/Remove cache interface for CPU device */
825 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
827 unsigned int cpu = sys_dev->id;
829 struct _index_kobject *this_object;
832 retval = cpuid4_cache_sysfs_init(cpu);
833 if (unlikely(retval < 0))
836 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
838 &sys_dev->kobj, "%s", "cache");
840 cpuid4_cache_sysfs_exit(cpu);
844 for (i = 0; i < num_cache_leaves; i++) {
845 this_object = INDEX_KOBJECT_PTR(cpu,i);
846 this_object->cpu = cpu;
847 this_object->index = i;
848 retval = kobject_init_and_add(&(this_object->kobj),
850 per_cpu(cache_kobject, cpu),
852 if (unlikely(retval)) {
853 for (j = 0; j < i; j++) {
854 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
856 kobject_put(per_cpu(cache_kobject, cpu));
857 cpuid4_cache_sysfs_exit(cpu);
860 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
862 cpu_set(cpu, cache_dev_map);
864 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
868 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
870 unsigned int cpu = sys_dev->id;
873 if (per_cpu(cpuid4_info, cpu) == NULL)
875 if (!cpu_isset(cpu, cache_dev_map))
877 cpu_clear(cpu, cache_dev_map);
879 for (i = 0; i < num_cache_leaves; i++)
880 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
881 kobject_put(per_cpu(cache_kobject, cpu));
882 cpuid4_cache_sysfs_exit(cpu);
885 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
886 unsigned long action, void *hcpu)
888 unsigned int cpu = (unsigned long)hcpu;
889 struct sys_device *sys_dev;
891 sys_dev = get_cpu_sysdev(cpu);
894 case CPU_ONLINE_FROZEN:
895 cache_add_dev(sys_dev);
898 case CPU_DEAD_FROZEN:
899 cache_remove_dev(sys_dev);
905 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
907 .notifier_call = cacheinfo_cpu_callback,
910 static int __cpuinit cache_sysfs_init(void)
914 if (num_cache_leaves == 0)
917 for_each_online_cpu(i) {
919 struct sys_device *sys_dev = get_cpu_sysdev(i);
921 err = cache_add_dev(sys_dev);
925 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
929 device_initcall(cache_sysfs_init);