2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
180 unsigned size_in_kb:8;
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
190 unsigned size_in_kb:16;
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
201 unsigned size_encoded:14;
206 static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
233 union l1_cache *l1 = &l1d;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
288 ebx->split.ways_of_associativity = assoc - 1;
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
295 struct attribute attr;
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
304 * L3 cache descriptors
306 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
308 struct amd_l3_cache *l3 = &nb->l3_cache;
309 unsigned int sc0, sc1, sc2, sc3;
312 pci_read_config_dword(nb->misc, 0x1C4, &val);
314 /* calculate subcache sizes */
315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
329 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
333 /* only for L3, and not in virtualized environments */
337 node = amd_get_nb_id(smp_processor_id());
338 this_leaf->nb = node_to_amd_nb(node);
339 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
340 amd_calc_l3_indices(this_leaf->nb);
344 * check whether a slot used for disabling an L3 index is occupied.
345 * @l3: L3 cache descriptor
346 * @slot: slot number (0..1)
348 * @returns: the disabled index if used or negative value if slot free.
350 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
352 unsigned int reg = 0;
354 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
356 /* check whether this slot is activated already */
357 if (reg & (3UL << 30))
363 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
368 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
371 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
373 return sprintf(buf, "%d\n", index);
375 return sprintf(buf, "FREE\n");
378 #define SHOW_CACHE_DISABLE(slot) \
380 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
383 return show_cache_disable(this_leaf, buf, slot); \
385 SHOW_CACHE_DISABLE(0)
386 SHOW_CACHE_DISABLE(1)
388 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
389 unsigned slot, unsigned long idx)
396 * disable index in all 4 subcaches
398 for (i = 0; i < 4; i++) {
399 u32 reg = idx | (i << 20);
401 if (!nb->l3_cache.subcaches[i])
404 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
414 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
419 * disable a L3 cache index by using a disable-slot
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
426 * @return: 0 on success, error status on failure
428 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
433 /* check if @slot is already used or the index is already disabled */
434 ret = amd_get_l3_disable_slot(nb, slot);
438 if (index > nb->l3_cache.indices)
441 /* check whether the other slot has disabled the same index already */
442 if (index == amd_get_l3_disable_slot(nb, !slot))
445 amd_l3_disable_index(nb, cpu, slot, index);
450 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
451 const char *buf, size_t count,
454 unsigned long val = 0;
457 if (!capable(CAP_SYS_ADMIN))
460 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
463 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
465 if (strict_strtoul(buf, 10, &val) < 0)
468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
471 pr_warning("L3 slot %d in use/index already disabled!\n",
478 #define STORE_CACHE_DISABLE(slot) \
480 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
481 const char *buf, size_t count, \
484 return store_cache_disable(this_leaf, buf, count, slot); \
486 STORE_CACHE_DISABLE(0)
487 STORE_CACHE_DISABLE(1)
489 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
490 show_cache_disable_0, store_cache_disable_0);
491 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
492 show_cache_disable_1, store_cache_disable_1);
495 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
497 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
500 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
504 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
509 if (!capable(CAP_SYS_ADMIN))
512 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
515 if (strict_strtoul(buf, 16, &val) < 0)
518 if (amd_set_subcaches(cpu, val))
524 static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
527 #else /* CONFIG_AMD_NB */
528 #define amd_init_l3_cache(x, y)
529 #endif /* CONFIG_AMD_NB */
532 __cpuinit cpuid4_cache_lookup_regs(int index,
533 struct _cpuid4_info_regs *this_leaf)
535 union _cpuid4_leaf_eax eax;
536 union _cpuid4_leaf_ebx ebx;
537 union _cpuid4_leaf_ecx ecx;
540 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
542 cpuid_count(0x8000001d, index, &eax.full,
543 &ebx.full, &ecx.full, &edx);
545 amd_cpuid4(index, &eax, &ebx, &ecx);
546 amd_init_l3_cache(this_leaf, index);
548 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
551 if (eax.split.type == CACHE_TYPE_NULL)
552 return -EIO; /* better error ? */
554 this_leaf->eax = eax;
555 this_leaf->ebx = ebx;
556 this_leaf->ecx = ecx;
557 this_leaf->size = (ecx.split.number_of_sets + 1) *
558 (ebx.split.coherency_line_size + 1) *
559 (ebx.split.physical_line_partition + 1) *
560 (ebx.split.ways_of_associativity + 1);
564 static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
566 unsigned int eax, ebx, ecx, edx, op;
567 union _cpuid4_leaf_eax cache_eax;
570 if (c->x86_vendor == X86_VENDOR_AMD)
577 /* Do cpuid(op) loop to find out num_cache_leaves */
578 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
579 cache_eax.full = eax;
580 } while (cache_eax.split.type != CACHE_TYPE_NULL);
584 void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
587 if (cpu_has_topoext) {
588 num_cache_leaves = find_num_cache_leaves(c);
589 } else if (c->extended_cpuid_level >= 0x80000006) {
590 if (cpuid_edx(0x80000006) & 0xf000)
591 num_cache_leaves = 4;
593 num_cache_leaves = 3;
597 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
600 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
601 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
602 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
603 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
605 unsigned int cpu = c->cpu_index;
608 if (c->cpuid_level > 3) {
609 static int is_initialized;
611 if (is_initialized == 0) {
612 /* Init num_cache_leaves from boot CPU */
613 num_cache_leaves = find_num_cache_leaves(c);
618 * Whenever possible use cpuid(4), deterministic cache
619 * parameters cpuid leaf to find the cache details
621 for (i = 0; i < num_cache_leaves; i++) {
622 struct _cpuid4_info_regs this_leaf;
625 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
627 switch (this_leaf.eax.split.level) {
629 if (this_leaf.eax.split.type ==
631 new_l1d = this_leaf.size/1024;
632 else if (this_leaf.eax.split.type ==
634 new_l1i = this_leaf.size/1024;
637 new_l2 = this_leaf.size/1024;
638 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
639 index_msb = get_count_order(num_threads_sharing);
640 l2_id = c->apicid & ~((1 << index_msb) - 1);
643 new_l3 = this_leaf.size/1024;
644 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
645 index_msb = get_count_order(
646 num_threads_sharing);
647 l3_id = c->apicid & ~((1 << index_msb) - 1);
656 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
659 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
660 /* supports eax=2 call */
662 unsigned int regs[4];
663 unsigned char *dp = (unsigned char *)regs;
666 if (num_cache_leaves != 0 && c->x86 == 15)
669 /* Number of times to iterate */
670 n = cpuid_eax(2) & 0xFF;
672 for (i = 0 ; i < n ; i++) {
673 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
675 /* If bit 31 is set, this is an unknown format */
676 for (j = 0 ; j < 3 ; j++)
677 if (regs[j] & (1 << 31))
680 /* Byte 0 is level count, not a descriptor */
681 for (j = 1 ; j < 16 ; j++) {
682 unsigned char des = dp[j];
685 /* look up this descriptor in the table */
686 while (cache_table[k].descriptor != 0) {
687 if (cache_table[k].descriptor == des) {
688 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
690 switch (cache_table[k].cache_type) {
692 l1i += cache_table[k].size;
695 l1d += cache_table[k].size;
698 l2 += cache_table[k].size;
701 l3 += cache_table[k].size;
704 trace += cache_table[k].size;
726 per_cpu(cpu_llc_id, cpu) = l2_id;
733 per_cpu(cpu_llc_id, cpu) = l3_id;
737 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
744 /* pointer to _cpuid4_info array (for each cache leaf) */
745 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
746 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
750 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
752 struct _cpuid4_info *this_leaf;
755 if (cpu_has_topoext) {
756 unsigned int apicid, nshared, first, last;
758 if (!per_cpu(ici_cpuid4_info, cpu))
761 this_leaf = CPUID4_INFO_IDX(cpu, index);
762 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
763 apicid = cpu_data(cpu).apicid;
764 first = apicid - (apicid % nshared);
765 last = first + nshared - 1;
767 for_each_online_cpu(i) {
768 apicid = cpu_data(i).apicid;
769 if ((apicid < first) || (apicid > last))
771 if (!per_cpu(ici_cpuid4_info, i))
773 this_leaf = CPUID4_INFO_IDX(i, index);
775 for_each_online_cpu(sibling) {
776 apicid = cpu_data(sibling).apicid;
777 if ((apicid < first) || (apicid > last))
779 set_bit(sibling, this_leaf->shared_cpu_map);
782 } else if (index == 3) {
783 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
784 if (!per_cpu(ici_cpuid4_info, i))
786 this_leaf = CPUID4_INFO_IDX(i, index);
787 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
788 if (!cpu_online(sibling))
790 set_bit(sibling, this_leaf->shared_cpu_map);
799 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
801 struct _cpuid4_info *this_leaf, *sibling_leaf;
802 unsigned long num_threads_sharing;
804 struct cpuinfo_x86 *c = &cpu_data(cpu);
806 if (c->x86_vendor == X86_VENDOR_AMD) {
807 if (cache_shared_amd_cpu_map_setup(cpu, index))
811 this_leaf = CPUID4_INFO_IDX(cpu, index);
812 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
814 if (num_threads_sharing == 1)
815 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
817 index_msb = get_count_order(num_threads_sharing);
819 for_each_online_cpu(i) {
820 if (cpu_data(i).apicid >> index_msb ==
821 c->apicid >> index_msb) {
823 to_cpumask(this_leaf->shared_cpu_map));
824 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
826 CPUID4_INFO_IDX(i, index);
827 cpumask_set_cpu(cpu, to_cpumask(
828 sibling_leaf->shared_cpu_map));
834 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
836 struct _cpuid4_info *this_leaf, *sibling_leaf;
839 this_leaf = CPUID4_INFO_IDX(cpu, index);
840 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
841 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
842 cpumask_clear_cpu(cpu,
843 to_cpumask(sibling_leaf->shared_cpu_map));
847 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
851 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
856 static void __cpuinit free_cache_attributes(unsigned int cpu)
860 for (i = 0; i < num_cache_leaves; i++)
861 cache_remove_shared_cpu_map(cpu, i);
863 kfree(per_cpu(ici_cpuid4_info, cpu));
864 per_cpu(ici_cpuid4_info, cpu) = NULL;
867 static void __cpuinit get_cpu_leaves(void *_retval)
869 int j, *retval = _retval, cpu = smp_processor_id();
871 /* Do cpuid and store the results */
872 for (j = 0; j < num_cache_leaves; j++) {
873 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
875 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
876 if (unlikely(*retval < 0)) {
879 for (i = 0; i < j; i++)
880 cache_remove_shared_cpu_map(cpu, i);
883 cache_shared_cpu_map_setup(cpu, j);
887 static int __cpuinit detect_cache_attributes(unsigned int cpu)
891 if (num_cache_leaves == 0)
894 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
895 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
896 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
899 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
901 kfree(per_cpu(ici_cpuid4_info, cpu));
902 per_cpu(ici_cpuid4_info, cpu) = NULL;
908 #include <linux/kobject.h>
909 #include <linux/sysfs.h>
910 #include <linux/cpu.h>
912 /* pointer to kobject for cpuX/cache */
913 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
915 struct _index_kobject {
918 unsigned short index;
921 /* pointer to array of kobjects for cpuX/cache/indexY */
922 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
923 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
925 #define show_one_plus(file_name, object, val) \
926 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
929 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
932 show_one_plus(level, base.eax.split.level, 0);
933 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
934 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
935 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
936 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
938 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
941 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
944 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
947 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
951 const struct cpumask *mask;
953 mask = to_cpumask(this_leaf->shared_cpu_map);
955 cpulist_scnprintf(buf, len-2, mask) :
956 cpumask_scnprintf(buf, len-2, mask);
963 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
966 return show_shared_cpu_map_func(leaf, 0, buf);
969 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
972 return show_shared_cpu_map_func(leaf, 1, buf);
975 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
978 switch (this_leaf->base.eax.split.type) {
979 case CACHE_TYPE_DATA:
980 return sprintf(buf, "Data\n");
981 case CACHE_TYPE_INST:
982 return sprintf(buf, "Instruction\n");
983 case CACHE_TYPE_UNIFIED:
984 return sprintf(buf, "Unified\n");
986 return sprintf(buf, "Unknown\n");
990 #define to_object(k) container_of(k, struct _index_kobject, kobj)
991 #define to_attr(a) container_of(a, struct _cache_attr, attr)
993 #define define_one_ro(_name) \
994 static struct _cache_attr _name = \
995 __ATTR(_name, 0444, show_##_name, NULL)
997 define_one_ro(level);
999 define_one_ro(coherency_line_size);
1000 define_one_ro(physical_line_partition);
1001 define_one_ro(ways_of_associativity);
1002 define_one_ro(number_of_sets);
1003 define_one_ro(size);
1004 define_one_ro(shared_cpu_map);
1005 define_one_ro(shared_cpu_list);
1007 static struct attribute *default_attrs[] = {
1010 &coherency_line_size.attr,
1011 &physical_line_partition.attr,
1012 &ways_of_associativity.attr,
1013 &number_of_sets.attr,
1015 &shared_cpu_map.attr,
1016 &shared_cpu_list.attr,
1020 #ifdef CONFIG_AMD_NB
1021 static struct attribute ** __cpuinit amd_l3_attrs(void)
1023 static struct attribute **attrs;
1029 n = ARRAY_SIZE(default_attrs);
1031 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1034 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1037 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1039 return attrs = default_attrs;
1041 for (n = 0; default_attrs[n]; n++)
1042 attrs[n] = default_attrs[n];
1044 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1045 attrs[n++] = &cache_disable_0.attr;
1046 attrs[n++] = &cache_disable_1.attr;
1049 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1050 attrs[n++] = &subcaches.attr;
1056 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1058 struct _cache_attr *fattr = to_attr(attr);
1059 struct _index_kobject *this_leaf = to_object(kobj);
1063 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1064 buf, this_leaf->cpu) :
1069 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1070 const char *buf, size_t count)
1072 struct _cache_attr *fattr = to_attr(attr);
1073 struct _index_kobject *this_leaf = to_object(kobj);
1076 ret = fattr->store ?
1077 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1078 buf, count, this_leaf->cpu) :
1083 static const struct sysfs_ops sysfs_ops = {
1088 static struct kobj_type ktype_cache = {
1089 .sysfs_ops = &sysfs_ops,
1090 .default_attrs = default_attrs,
1093 static struct kobj_type ktype_percpu_entry = {
1094 .sysfs_ops = &sysfs_ops,
1097 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1099 kfree(per_cpu(ici_cache_kobject, cpu));
1100 kfree(per_cpu(ici_index_kobject, cpu));
1101 per_cpu(ici_cache_kobject, cpu) = NULL;
1102 per_cpu(ici_index_kobject, cpu) = NULL;
1103 free_cache_attributes(cpu);
1106 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1110 if (num_cache_leaves == 0)
1113 err = detect_cache_attributes(cpu);
1117 /* Allocate all required memory */
1118 per_cpu(ici_cache_kobject, cpu) =
1119 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1120 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1123 per_cpu(ici_index_kobject, cpu) = kzalloc(
1124 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1125 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1131 cpuid4_cache_sysfs_exit(cpu);
1135 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1137 /* Add/Remove cache interface for CPU device */
1138 static int __cpuinit cache_add_dev(struct device *dev)
1140 unsigned int cpu = dev->id;
1142 struct _index_kobject *this_object;
1143 struct _cpuid4_info *this_leaf;
1146 retval = cpuid4_cache_sysfs_init(cpu);
1147 if (unlikely(retval < 0))
1150 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1151 &ktype_percpu_entry,
1152 &dev->kobj, "%s", "cache");
1154 cpuid4_cache_sysfs_exit(cpu);
1158 for (i = 0; i < num_cache_leaves; i++) {
1159 this_object = INDEX_KOBJECT_PTR(cpu, i);
1160 this_object->cpu = cpu;
1161 this_object->index = i;
1163 this_leaf = CPUID4_INFO_IDX(cpu, i);
1165 ktype_cache.default_attrs = default_attrs;
1166 #ifdef CONFIG_AMD_NB
1167 if (this_leaf->base.nb)
1168 ktype_cache.default_attrs = amd_l3_attrs();
1170 retval = kobject_init_and_add(&(this_object->kobj),
1172 per_cpu(ici_cache_kobject, cpu),
1174 if (unlikely(retval)) {
1175 for (j = 0; j < i; j++)
1176 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1177 kobject_put(per_cpu(ici_cache_kobject, cpu));
1178 cpuid4_cache_sysfs_exit(cpu);
1181 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1183 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1185 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1189 static void __cpuinit cache_remove_dev(struct device *dev)
1191 unsigned int cpu = dev->id;
1194 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1196 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1198 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1200 for (i = 0; i < num_cache_leaves; i++)
1201 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1202 kobject_put(per_cpu(ici_cache_kobject, cpu));
1203 cpuid4_cache_sysfs_exit(cpu);
1206 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1207 unsigned long action, void *hcpu)
1209 unsigned int cpu = (unsigned long)hcpu;
1212 dev = get_cpu_device(cpu);
1215 case CPU_ONLINE_FROZEN:
1219 case CPU_DEAD_FROZEN:
1220 cache_remove_dev(dev);
1226 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1227 .notifier_call = cacheinfo_cpu_callback,
1230 static int __cpuinit cache_sysfs_init(void)
1234 if (num_cache_leaves == 0)
1237 for_each_online_cpu(i) {
1239 struct device *dev = get_cpu_device(i);
1241 err = cache_add_dev(dev);
1245 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1249 device_initcall(cache_sysfs_init);