2 * Hypervisor supplied "24x7" performance counter support
4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5 * Copyright 2014 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "hv-24x7: " fmt
15 #include <linux/perf_event.h>
16 #include <linux/rbtree.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <asm/firmware.h>
22 #include <asm/hvcall.h>
24 #include <linux/byteorder/generic.h>
27 #include "hv-24x7-catalog.h"
28 #include "hv-common.h"
30 static bool domain_is_valid(unsigned domain)
33 #define DOMAIN(n, v, x, c) \
34 case HV_PERF_DOMAIN_##n: \
36 #include "hv-24x7-domains.h"
44 static bool is_physical_domain(unsigned domain)
47 #define DOMAIN(n, v, x, c) \
48 case HV_PERF_DOMAIN_##n: \
50 #include "hv-24x7-domains.h"
57 static const char *domain_name(unsigned domain)
59 if (!domain_is_valid(domain))
63 case HV_PERF_DOMAIN_PHYS_CHIP: return "Physical Chip";
64 case HV_PERF_DOMAIN_PHYS_CORE: return "Physical Core";
65 case HV_PERF_DOMAIN_VCPU_HOME_CORE: return "VCPU Home Core";
66 case HV_PERF_DOMAIN_VCPU_HOME_CHIP: return "VCPU Home Chip";
67 case HV_PERF_DOMAIN_VCPU_HOME_NODE: return "VCPU Home Node";
68 case HV_PERF_DOMAIN_VCPU_REMOTE_NODE: return "VCPU Remote Node";
75 static bool catalog_entry_domain_is_valid(unsigned domain)
77 return is_physical_domain(domain);
81 * TODO: Merging events:
82 * - Think of the hcall as an interface to a 4d array of counters:
84 * - y = indexes in the domain (core, chip, vcpu, node, etc)
85 * - z = offset into the counter space
86 * - w = lpars (guest vms, "logical partitions")
87 * - A single request is: x,y,y_last,z,z_last,w,w_last
88 * - this means we can retrieve a rectangle of counters in y,z for a single x.
90 * - Things to consider (ignoring w):
91 * - input cost_per_request = 16
92 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
93 * - limited number of requests per hcall (must fit into 4K bytes)
94 * - 4k = 16 [buffer header] - 16 [request size] * request_count
95 * - 255 requests per hcall
96 * - sometimes it will be more efficient to read extra data and discard
101 * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
104 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
105 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
107 EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
108 EVENT_DEFINE_RANGE_FORMAT(chip, config, 16, 31);
109 EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
110 /* u32, see "data_offset" */
111 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
113 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
115 EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
116 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
117 EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
119 static struct attribute *format_attrs[] = {
120 &format_attr_domain.attr,
121 &format_attr_offset.attr,
122 &format_attr_core.attr,
123 &format_attr_chip.attr,
124 &format_attr_vcpu.attr,
125 &format_attr_lpar.attr,
129 static struct attribute_group format_group = {
131 .attrs = format_attrs,
134 static struct attribute_group event_group = {
136 /* .attrs is set in init */
139 static struct attribute_group event_desc_group = {
140 .name = "event_descs",
141 /* .attrs is set in init */
144 static struct attribute_group event_long_desc_group = {
145 .name = "event_long_descs",
146 /* .attrs is set in init */
149 static struct kmem_cache *hv_page_cache;
151 DEFINE_PER_CPU(int, hv_24x7_txn_flags);
152 DEFINE_PER_CPU(int, hv_24x7_txn_err);
155 struct perf_event *events[255];
158 DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
161 * request_buffer and result_buffer are not required to be 4k aligned,
162 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
163 * the simplest way to ensure that.
165 #define H24x7_DATA_BUFFER_SIZE 4096
166 DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
167 DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
169 #define MAX_NUM_REQUESTS ((H24x7_DATA_BUFFER_SIZE - \
170 sizeof(struct hv_24x7_request_buffer)) \
171 / sizeof(struct hv_24x7_request))
173 static char *event_name(struct hv_24x7_event_data *ev, int *len)
175 *len = be16_to_cpu(ev->event_name_len) - 2;
176 return (char *)ev->remainder;
179 static char *event_desc(struct hv_24x7_event_data *ev, int *len)
181 unsigned nl = be16_to_cpu(ev->event_name_len);
182 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
184 *len = be16_to_cpu(*desc_len) - 2;
185 return (char *)ev->remainder + nl;
188 static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
190 unsigned nl = be16_to_cpu(ev->event_name_len);
191 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
192 unsigned desc_len = be16_to_cpu(*desc_len_);
193 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
195 *len = be16_to_cpu(*long_desc_len) - 2;
196 return (char *)ev->remainder + nl + desc_len;
199 static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
204 return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
208 * Things we don't check:
209 * - padding for desc, name, and long/detailed desc is required to be '\0'
212 * Return NULL if we pass end,
213 * Otherwise return the address of the byte just following the event.
215 static void *event_end(struct hv_24x7_event_data *ev, void *end)
220 unsigned nl = be16_to_cpu(ev->event_name_len);
223 pr_debug("%s: name length too short: %d", __func__, nl);
227 if (start + nl > end) {
228 pr_debug("%s: start=%p + nl=%u > end=%p",
229 __func__, start, nl, end);
233 dl_ = (__be16 *)(ev->remainder + nl - 2);
234 if (!IS_ALIGNED((uintptr_t)dl_, 2))
235 pr_warn("desc len not aligned %p", dl_);
236 dl = be16_to_cpu(*dl_);
238 pr_debug("%s: desc len too short: %d", __func__, dl);
242 if (start + nl + dl > end) {
243 pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
244 __func__, start, nl, dl, start + nl + dl, end);
248 ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
249 if (!IS_ALIGNED((uintptr_t)ldl_, 2))
250 pr_warn("long desc len not aligned %p", ldl_);
251 ldl = be16_to_cpu(*ldl_);
253 pr_debug("%s: long desc len too short (ldl=%u)",
258 if (start + nl + dl + ldl > end) {
259 pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
260 __func__, start, nl, dl, ldl, end);
264 return start + nl + dl + ldl;
267 static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
268 unsigned long version,
271 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
272 phys_4096, version, index);
274 WARN_ON(!IS_ALIGNED(phys_4096, 4096));
276 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
277 phys_4096, version, index);
280 static unsigned long h_get_24x7_catalog_page(char page[],
281 u64 version, u32 index)
283 return h_get_24x7_catalog_page_(virt_to_phys(page),
288 * Each event we find in the catalog, will have a sysfs entry. Format the
289 * data for this sysfs entry based on the event's domain.
291 * Events belonging to the Chip domain can only be monitored in that domain.
292 * i.e the domain for these events is a fixed/knwon value.
294 * Events belonging to the Core domain can be monitored either in the physical
295 * core or in one of the virtual CPU domains. So the domain value for these
296 * events must be specified by the user (i.e is a required parameter). Format
297 * the Core events with 'domain=?' so the perf-tool can error check required
300 * NOTE: For the Core domain events, rather than making domain a required
301 * parameter we could default it to PHYS_CORE and allowe users to
302 * override the domain to one of the VCPU domains.
304 * However, this can make the interface a little inconsistent.
306 * If we set domain=2 (PHYS_CHIP) and allow user to override this field
307 * the user may be tempted to also modify the "offset=x" field in which
308 * can lead to confusing usage. Consider the HPM_PCYC (offset=0x18) and
309 * HPM_INST (offset=0x20) events. With:
311 * perf stat -e hv_24x7/HPM_PCYC,offset=0x20/
313 * we end up monitoring HPM_INST, while the command line has HPM_PCYC.
315 * By not assigning a default value to the domain for the Core events,
316 * we can have simple guidelines:
318 * - Specifying values for parameters with "=?" is required.
320 * - Specifying (i.e overriding) values for other parameters
323 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
327 const char *domain_str;
331 case HV_PERF_DOMAIN_PHYS_CHIP:
332 snprintf(buf, sizeof(buf), "%d", domain);
337 case HV_PERF_DOMAIN_PHYS_CORE:
348 return kasprintf(GFP_KERNEL,
349 "domain=%s,offset=0x%x,%s=?,lpar=%s",
351 be16_to_cpu(event->event_counter_offs) +
352 be16_to_cpu(event->event_group_record_offs),
357 /* Avoid trusting fw to NUL terminate strings */
358 static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
360 return kasprintf(gfp, "%.*s", max_len, maybe_str);
363 static ssize_t device_show_string(struct device *dev,
364 struct device_attribute *attr, char *buf)
366 struct dev_ext_attribute *d;
368 d = container_of(attr, struct dev_ext_attribute, attr);
370 return sprintf(buf, "%s\n", (char *)d->var);
373 static struct attribute *device_str_attr_create_(char *name, char *str)
375 struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
380 sysfs_attr_init(&attr->attr.attr);
383 attr->attr.attr.name = name;
384 attr->attr.attr.mode = 0444;
385 attr->attr.show = device_show_string;
387 return &attr->attr.attr;
391 * Allocate and initialize strings representing event attributes.
393 * NOTE: The strings allocated here are never destroyed and continue to
394 * exist till shutdown. This is to allow us to create as many events
395 * from the catalog as possible, even if we encounter errors with some.
396 * In case of changes to error paths in future, these may need to be
397 * freed by the caller.
399 static struct attribute *device_str_attr_create(char *name, int name_max,
401 char *str, size_t str_max)
404 char *s = memdup_to_str(str, str_max, GFP_KERNEL);
411 n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
413 n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
418 a = device_str_attr_create_(n, s);
430 static struct attribute *event_to_attr(unsigned ix,
431 struct hv_24x7_event_data *event,
436 char *ev_name, *a_ev_name, *val;
437 struct attribute *attr;
439 if (!domain_is_valid(domain)) {
440 pr_warn("catalog event %u has invalid domain %u\n",
445 val = event_fmt(event, domain);
449 ev_name = event_name(event, &event_name_len);
451 a_ev_name = kasprintf(GFP_KERNEL, "%.*s",
452 (int)event_name_len, ev_name);
454 a_ev_name = kasprintf(GFP_KERNEL, "%.*s__%d",
455 (int)event_name_len, ev_name, nonce);
460 attr = device_str_attr_create_(a_ev_name, val);
472 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
476 char *name = event_name(event, &nl);
477 char *desc = event_desc(event, &dl);
479 /* If there isn't a description, don't create the sysfs file */
483 return device_str_attr_create(name, nl, nonce, desc, dl);
486 static struct attribute *
487 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
490 char *name = event_name(event, &nl);
491 char *desc = event_long_desc(event, &dl);
493 /* If there isn't a description, don't create the sysfs file */
497 return device_str_attr_create(name, nl, nonce, desc, dl);
500 static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
501 struct hv_24x7_event_data *event, int nonce)
503 *attrs = event_to_attr(ix, event, event->domain, nonce);
519 static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
526 return memcmp(d1, d2, s1);
529 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
530 size_t s2, unsigned d2)
532 int r = memord(v1, s1, v2, s2);
543 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
546 struct rb_node **new = &(root->rb_node), *parent = NULL;
547 struct event_uniq *data;
549 /* Figure out where to put new node */
551 struct event_uniq *it;
554 it = container_of(*new, struct event_uniq, node);
555 result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
560 new = &((*new)->rb_left);
562 new = &((*new)->rb_right);
565 pr_info("found a duplicate event %.*s, ct=%u\n", nl,
571 data = kmalloc(sizeof(*data), GFP_KERNEL);
575 *data = (struct event_uniq) {
582 /* Add new node and rebalance tree. */
583 rb_link_node(&data->node, parent, new);
584 rb_insert_color(&data->node, root);
590 static void event_uniq_destroy(struct rb_root *root)
593 * the strings we point to are in the giant block of memory filled by
594 * the catalog, and are freed separately.
596 struct event_uniq *pos, *n;
598 rbtree_postorder_for_each_entry_safe(pos, n, root, node)
604 * ensure the event structure's sizes are self consistent and don't cause us to
605 * read outside of the event
607 * On success, return the event length in bytes.
608 * Otherwise, return -1 (and print as appropriate).
610 static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
612 size_t event_data_bytes,
613 size_t event_entry_count,
614 size_t offset, void *end)
617 void *ev_end, *calc_ev_end;
619 if (offset >= event_data_bytes)
622 if (event_idx >= event_entry_count) {
623 pr_devel("catalog event data has %zu bytes of padding after last event\n",
624 event_data_bytes - offset);
628 if (!event_fixed_portion_is_within(event, end)) {
629 pr_warn("event %zu fixed portion is not within range\n",
634 ev_len = be16_to_cpu(event->length);
637 pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
638 event_idx, ev_len, event);
640 ev_end = (__u8 *)event + ev_len;
642 pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
643 event_idx, ev_len, ev_end, end,
648 calc_ev_end = event_end(event, end);
650 pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
651 event_idx, event_data_bytes, event, end,
656 if (calc_ev_end > ev_end) {
657 pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
658 event_idx, event, ev_end, offset, calc_ev_end);
665 #define MAX_4K (SIZE_MAX / 4096)
667 static int create_events_from_catalog(struct attribute ***events_,
668 struct attribute ***event_descs_,
669 struct attribute ***event_long_descs_)
672 size_t catalog_len, catalog_page_len, event_entry_count,
673 event_data_len, event_data_offs,
674 event_data_bytes, junk_events, event_idx, event_attr_ct, i,
675 attr_max, event_idx_last, desc_ct, long_desc_ct;
677 uint64_t catalog_version_num;
678 struct attribute **events, **event_descs, **event_long_descs;
679 struct hv_24x7_catalog_page_0 *page_0 =
680 kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
682 void *event_data, *end;
683 struct hv_24x7_event_data *event;
684 struct rb_root ev_uniq = RB_ROOT;
692 hret = h_get_24x7_catalog_page(page, 0, 0);
698 catalog_version_num = be64_to_cpu(page_0->version);
699 catalog_page_len = be32_to_cpu(page_0->length);
701 if (MAX_4K < catalog_page_len) {
702 pr_err("invalid page count: %zu\n", catalog_page_len);
707 catalog_len = catalog_page_len * 4096;
709 event_entry_count = be16_to_cpu(page_0->event_entry_count);
710 event_data_offs = be16_to_cpu(page_0->event_data_offs);
711 event_data_len = be16_to_cpu(page_0->event_data_len);
713 pr_devel("cv %llu cl %zu eec %zu edo %zu edl %zu\n",
714 catalog_version_num, catalog_len,
715 event_entry_count, event_data_offs, event_data_len);
717 if ((MAX_4K < event_data_len)
718 || (MAX_4K < event_data_offs)
719 || (MAX_4K - event_data_offs < event_data_len)) {
720 pr_err("invalid event data offs %zu and/or len %zu\n",
721 event_data_offs, event_data_len);
726 if ((event_data_offs + event_data_len) > catalog_page_len) {
727 pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
729 event_data_offs + event_data_len,
735 if (SIZE_MAX - 1 < event_entry_count) {
736 pr_err("event_entry_count %zu is invalid\n", event_entry_count);
741 event_data_bytes = event_data_len * 4096;
744 * event data can span several pages, events can cross between these
745 * pages. Use vmalloc to make this easier.
747 event_data = vmalloc(event_data_bytes);
749 pr_err("could not allocate event data\n");
754 end = event_data + event_data_bytes;
757 * using vmalloc_to_phys() like this only works if PAGE_SIZE is
760 BUILD_BUG_ON(PAGE_SIZE % 4096);
762 for (i = 0; i < event_data_len; i++) {
763 hret = h_get_24x7_catalog_page_(
764 vmalloc_to_phys(event_data + i * 4096),
766 i + event_data_offs);
768 pr_err("Failed to get event data in page %zu: rc=%ld\n",
769 i + event_data_offs, hret);
776 * scan the catalog to determine the number of attributes we need, and
777 * verify it at the same time.
779 for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
781 event_idx++, event = (void *)event + ev_len) {
782 size_t offset = (void *)event - (void *)event_data;
786 ev_len = catalog_event_len_validate(event, event_idx,
793 name = event_name(event, &nl);
795 if (event->event_group_record_len == 0) {
796 pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
797 event_idx, nl, name);
802 if (!catalog_entry_domain_is_valid(event->domain)) {
803 pr_info("event %zu (%.*s) has invalid domain %d\n",
804 event_idx, nl, name, event->domain);
812 event_idx_last = event_idx;
813 if (event_idx_last != event_entry_count)
814 pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
815 event_idx_last, event_entry_count, junk_events);
817 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
823 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
830 event_long_descs = kmalloc_array(event_idx + 1,
831 sizeof(*event_long_descs), GFP_KERNEL);
832 if (!event_long_descs) {
837 /* Iterate over the catalog filling in the attribute vector */
838 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
839 event = event_data, event_idx = 0;
840 event_idx < event_idx_last;
841 event_idx++, ev_len = be16_to_cpu(event->length),
842 event = (void *)event + ev_len) {
847 * these are the only "bad" events that are intermixed and that
848 * we can ignore without issue. make sure to skip them here
850 if (event->event_group_record_len == 0)
852 if (!catalog_entry_domain_is_valid(event->domain))
855 name = event_name(event, &nl);
856 nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
857 ct = event_data_to_attrs(event_idx, events + event_attr_ct,
860 pr_warn("event %zu (%.*s) creation failure, skipping\n",
861 event_idx, nl, name);
865 event_descs[desc_ct] = event_to_desc_attr(event, nonce);
866 if (event_descs[desc_ct])
868 event_long_descs[long_desc_ct] =
869 event_to_long_desc_attr(event, nonce);
870 if (event_long_descs[long_desc_ct])
875 pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
876 event_idx, event_attr_ct, junk_events, desc_ct);
878 events[event_attr_ct] = NULL;
879 event_descs[desc_ct] = NULL;
880 event_long_descs[long_desc_ct] = NULL;
882 event_uniq_destroy(&ev_uniq);
884 kmem_cache_free(hv_page_cache, page);
887 *event_descs_ = event_descs;
888 *event_long_descs_ = event_long_descs;
898 kmem_cache_free(hv_page_cache, page);
901 *event_descs_ = NULL;
902 *event_long_descs_ = NULL;
906 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
907 struct bin_attribute *bin_attr, char *buf,
908 loff_t offset, size_t count)
912 size_t catalog_len = 0, catalog_page_len = 0;
913 loff_t page_offset = 0;
914 loff_t offset_in_page;
916 uint64_t catalog_version_num = 0;
917 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
918 struct hv_24x7_catalog_page_0 *page_0 = page;
923 hret = h_get_24x7_catalog_page(page, 0, 0);
929 catalog_version_num = be64_to_cpu(page_0->version);
930 catalog_page_len = be32_to_cpu(page_0->length);
931 catalog_len = catalog_page_len * 4096;
933 page_offset = offset / 4096;
934 offset_in_page = offset % 4096;
936 if (page_offset >= catalog_page_len)
939 if (page_offset != 0) {
940 hret = h_get_24x7_catalog_page(page, catalog_version_num,
948 copy_len = 4096 - offset_in_page;
949 if (copy_len > count)
952 memcpy(buf, page+offset_in_page, copy_len);
957 pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
959 catalog_version_num, page_offset, hret);
960 kmem_cache_free(hv_page_cache, page);
962 pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
963 "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
964 count, catalog_len, catalog_page_len, ret);
969 static ssize_t domains_show(struct device *dev, struct device_attribute *attr,
975 for (d = 0; d < HV_PERF_DOMAIN_MAX; d++) {
976 str = domain_name(d);
980 n = sprintf(page, "%d: %s\n", d, str);
990 #define PAGE_0_ATTR(_name, _fmt, _expr) \
991 static ssize_t _name##_show(struct device *dev, \
992 struct device_attribute *dev_attr, \
995 unsigned long hret; \
997 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
998 struct hv_24x7_catalog_page_0 *page_0 = page; \
1001 hret = h_get_24x7_catalog_page(page, 0, 0); \
1006 ret = sprintf(buf, _fmt, _expr); \
1008 kmem_cache_free(hv_page_cache, page); \
1011 static DEVICE_ATTR_RO(_name)
1013 PAGE_0_ATTR(catalog_version, "%lld\n",
1014 (unsigned long long)be64_to_cpu(page_0->version));
1015 PAGE_0_ATTR(catalog_len, "%lld\n",
1016 (unsigned long long)be32_to_cpu(page_0->length) * 4096);
1017 static BIN_ATTR_RO(catalog, 0/* real length varies */);
1018 static DEVICE_ATTR_RO(domains);
1020 static struct bin_attribute *if_bin_attrs[] = {
1025 static struct attribute *if_attrs[] = {
1026 &dev_attr_catalog_len.attr,
1027 &dev_attr_catalog_version.attr,
1028 &dev_attr_domains.attr,
1032 static struct attribute_group if_group = {
1033 .name = "interface",
1034 .bin_attrs = if_bin_attrs,
1038 static const struct attribute_group *attr_groups[] = {
1042 &event_long_desc_group,
1048 * Start the process for a new H_GET_24x7_DATA hcall.
1050 static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1051 struct hv_24x7_data_result_buffer *result_buffer)
1054 memset(request_buffer, 0, 4096);
1055 memset(result_buffer, 0, 4096);
1057 request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
1058 /* memset above set request_buffer->num_requests to 0 */
1062 * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
1063 * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
1065 static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1066 struct hv_24x7_data_result_buffer *result_buffer)
1071 * NOTE: Due to variable number of array elements in request and
1072 * result buffer(s), sizeof() is not reliable. Use the actual
1073 * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
1075 ret = plpar_hcall_norets(H_GET_24X7_DATA,
1076 virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
1077 virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
1080 struct hv_24x7_request *req;
1082 req = &request_buffer->requests[0];
1083 pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
1084 req->performance_domain, req->data_offset,
1085 req->starting_ix, req->starting_lpar_ix,
1086 ret, ret, result_buffer->detailed_rc,
1087 result_buffer->failing_request_ix);
1094 * Add the given @event to the next slot in the 24x7 request_buffer.
1096 * Note that H_GET_24X7_DATA hcall allows reading several counters'
1097 * values in a single HCALL. We expect the caller to add events to the
1098 * request buffer one by one, make the HCALL and process the results.
1100 static int add_event_to_24x7_request(struct perf_event *event,
1101 struct hv_24x7_request_buffer *request_buffer)
1105 struct hv_24x7_request *req;
1107 if (request_buffer->num_requests >= MAX_NUM_REQUESTS) {
1108 pr_devel("Too many requests for 24x7 HCALL %d\n",
1109 request_buffer->num_requests);
1113 switch (event_get_domain(event)) {
1114 case HV_PERF_DOMAIN_PHYS_CHIP:
1115 idx = event_get_chip(event);
1117 case HV_PERF_DOMAIN_PHYS_CORE:
1118 idx = event_get_core(event);
1121 idx = event_get_vcpu(event);
1124 i = request_buffer->num_requests++;
1125 req = &request_buffer->requests[i];
1127 req->performance_domain = event_get_domain(event);
1128 req->data_size = cpu_to_be16(8);
1129 req->data_offset = cpu_to_be32(event_get_offset(event));
1130 req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
1131 req->max_num_lpars = cpu_to_be16(1);
1132 req->starting_ix = cpu_to_be16(idx);
1133 req->max_ix = cpu_to_be16(1);
1138 static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
1142 struct hv_24x7_result *result;
1143 struct hv_24x7_request_buffer *request_buffer;
1144 struct hv_24x7_data_result_buffer *result_buffer;
1146 BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
1147 BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
1149 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1150 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1152 init_24x7_request(request_buffer, result_buffer);
1154 ret = add_event_to_24x7_request(event, request_buffer);
1158 ret = make_24x7_request(request_buffer, result_buffer);
1162 result = result_buffer->results;
1164 /* This code assumes that a result has only one element. */
1165 num_elements = be16_to_cpu(result->num_elements_returned);
1166 WARN_ON_ONCE(num_elements != 1);
1168 /* process result from hcall */
1169 *count = be64_to_cpu(result->elements[0].element_data[0]);
1172 put_cpu_var(hv_24x7_reqb);
1173 put_cpu_var(hv_24x7_resb);
1178 static int h_24x7_event_init(struct perf_event *event)
1180 struct hv_perf_caps caps;
1186 if (event->attr.type != event->pmu->type)
1189 /* Unused areas must be 0 */
1190 if (event_get_reserved1(event) ||
1191 event_get_reserved2(event) ||
1192 event_get_reserved3(event)) {
1193 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
1195 event_get_reserved1(event),
1196 event->attr.config1,
1197 event_get_reserved2(event),
1198 event->attr.config2,
1199 event_get_reserved3(event));
1203 /* unsupported modes and filters */
1204 if (event->attr.exclude_user ||
1205 event->attr.exclude_kernel ||
1206 event->attr.exclude_hv ||
1207 event->attr.exclude_idle ||
1208 event->attr.exclude_host ||
1209 event->attr.exclude_guest)
1212 /* no branch sampling */
1213 if (has_branch_stack(event))
1216 /* offset must be 8 byte aligned */
1217 if (event_get_offset(event) % 8) {
1218 pr_devel("bad alignment\n");
1222 /* Domains above 6 are invalid */
1223 domain = event_get_domain(event);
1225 pr_devel("invalid domain %d\n", domain);
1229 hret = hv_perf_caps_get(&caps);
1231 pr_devel("could not get capabilities: rc=%ld\n", hret);
1235 /* Physical domains & other lpars require extra capabilities */
1236 if (!caps.collect_privileged && (is_physical_domain(domain) ||
1237 (event_get_lpar(event) != event_get_lpar_max()))) {
1238 pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
1239 is_physical_domain(domain),
1240 event_get_lpar(event));
1244 /* Get the initial value of the counter for this event */
1245 if (single_24x7_request(event, &ct)) {
1246 pr_devel("test hcall failed\n");
1249 (void)local64_xchg(&event->hw.prev_count, ct);
1254 static u64 h_24x7_get_value(struct perf_event *event)
1258 ret = single_24x7_request(event, &ct);
1260 /* We checked this in event init, shouldn't fail here... */
1266 static void update_event_count(struct perf_event *event, u64 now)
1270 prev = local64_xchg(&event->hw.prev_count, now);
1271 local64_add(now - prev, &event->count);
1274 static void h_24x7_event_read(struct perf_event *event)
1277 struct hv_24x7_request_buffer *request_buffer;
1278 struct hv_24x7_hw *h24x7hw;
1281 txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1284 * If in a READ transaction, add this counter to the list of
1285 * counters to read during the next HCALL (i.e commit_txn()).
1286 * If not in a READ transaction, go ahead and make the HCALL
1287 * to read this counter by itself.
1290 if (txn_flags & PERF_PMU_TXN_READ) {
1294 if (__this_cpu_read(hv_24x7_txn_err))
1297 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1299 ret = add_event_to_24x7_request(event, request_buffer);
1301 __this_cpu_write(hv_24x7_txn_err, ret);
1304 * Associate the event with the HCALL request index,
1305 * so ->commit_txn() can quickly find/update count.
1307 i = request_buffer->num_requests - 1;
1309 h24x7hw = &get_cpu_var(hv_24x7_hw);
1310 h24x7hw->events[i] = event;
1311 put_cpu_var(h24x7hw);
1313 * Clear the event count so we can compute the _change_
1314 * in the 24x7 raw counter value at the end of the txn.
1316 * Note that we could alternatively read the 24x7 value
1317 * now and save its value in event->hw.prev_count. But
1318 * that would require issuing a hcall, which would then
1319 * defeat the purpose of using the txn interface.
1321 local64_set(&event->count, 0);
1324 put_cpu_var(hv_24x7_reqb);
1326 now = h_24x7_get_value(event);
1327 update_event_count(event, now);
1331 static void h_24x7_event_start(struct perf_event *event, int flags)
1333 if (flags & PERF_EF_RELOAD)
1334 local64_set(&event->hw.prev_count, h_24x7_get_value(event));
1337 static void h_24x7_event_stop(struct perf_event *event, int flags)
1339 h_24x7_event_read(event);
1342 static int h_24x7_event_add(struct perf_event *event, int flags)
1344 if (flags & PERF_EF_START)
1345 h_24x7_event_start(event, flags);
1351 * 24x7 counters only support READ transactions. They are
1352 * always counting and dont need/support ADD transactions.
1353 * Cache the flags, but otherwise ignore transactions that
1354 * are not PERF_PMU_TXN_READ.
1356 static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
1358 struct hv_24x7_request_buffer *request_buffer;
1359 struct hv_24x7_data_result_buffer *result_buffer;
1361 /* We should not be called if we are already in a txn */
1362 WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags));
1364 __this_cpu_write(hv_24x7_txn_flags, flags);
1365 if (flags & ~PERF_PMU_TXN_READ)
1368 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1369 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1371 init_24x7_request(request_buffer, result_buffer);
1373 put_cpu_var(hv_24x7_resb);
1374 put_cpu_var(hv_24x7_reqb);
1378 * Clean up transaction state.
1380 * NOTE: Ignore state of request and result buffers for now.
1381 * We will initialize them during the next read/txn.
1383 static void reset_txn(void)
1385 __this_cpu_write(hv_24x7_txn_flags, 0);
1386 __this_cpu_write(hv_24x7_txn_err, 0);
1390 * 24x7 counters only support READ transactions. They are always counting
1391 * and dont need/support ADD transactions. Clear ->txn_flags but otherwise
1392 * ignore transactions that are not of type PERF_PMU_TXN_READ.
1394 * For READ transactions, submit all pending 24x7 requests (i.e requests
1395 * that were queued by h_24x7_event_read()), to the hypervisor and update
1398 static int h_24x7_event_commit_txn(struct pmu *pmu)
1400 struct hv_24x7_request_buffer *request_buffer;
1401 struct hv_24x7_data_result_buffer *result_buffer;
1402 struct hv_24x7_result *res, *next_res;
1404 int i, ret, txn_flags;
1405 struct hv_24x7_hw *h24x7hw;
1407 txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1408 WARN_ON_ONCE(!txn_flags);
1411 if (txn_flags & ~PERF_PMU_TXN_READ)
1414 ret = __this_cpu_read(hv_24x7_txn_err);
1418 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1419 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1421 ret = make_24x7_request(request_buffer, result_buffer);
1425 h24x7hw = &get_cpu_var(hv_24x7_hw);
1427 /* Go through results in the result buffer to update event counts. */
1428 for (i = 0, res = result_buffer->results;
1429 i < result_buffer->num_results; i++, res = next_res) {
1430 struct perf_event *event = h24x7hw->events[res->result_ix];
1431 u16 num_elements = be16_to_cpu(res->num_elements_returned);
1432 u16 data_size = be16_to_cpu(res->result_element_data_size);
1434 /* This code assumes that a result has only one element. */
1435 WARN_ON_ONCE(num_elements != 1);
1437 count = be64_to_cpu(res->elements[0].element_data[0]);
1438 update_event_count(event, count);
1440 next_res = (void *) res->elements[0].element_data + data_size;
1443 put_cpu_var(hv_24x7_hw);
1446 put_cpu_var(hv_24x7_resb);
1447 put_cpu_var(hv_24x7_reqb);
1454 * 24x7 counters only support READ transactions. They are always counting
1455 * and dont need/support ADD transactions. However, regardless of type
1456 * of transaction, all we need to do is cleanup, so we don't have to check
1457 * the type of transaction.
1459 static void h_24x7_event_cancel_txn(struct pmu *pmu)
1461 WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags));
1465 static struct pmu h_24x7_pmu = {
1466 .task_ctx_nr = perf_invalid_context,
1469 .attr_groups = attr_groups,
1470 .event_init = h_24x7_event_init,
1471 .add = h_24x7_event_add,
1472 .del = h_24x7_event_stop,
1473 .start = h_24x7_event_start,
1474 .stop = h_24x7_event_stop,
1475 .read = h_24x7_event_read,
1476 .start_txn = h_24x7_event_start_txn,
1477 .commit_txn = h_24x7_event_commit_txn,
1478 .cancel_txn = h_24x7_event_cancel_txn,
1481 static int hv_24x7_init(void)
1485 struct hv_perf_caps caps;
1487 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
1488 pr_debug("not a virtualized system, not enabling\n");
1492 hret = hv_perf_caps_get(&caps);
1494 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
1499 hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
1503 /* sampling not supported */
1504 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1506 r = create_events_from_catalog(&event_group.attrs,
1507 &event_desc_group.attrs,
1508 &event_long_desc_group.attrs);
1513 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
1520 device_initcall(hv_24x7_init);