]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/trace/trace_events.h
Merge remote-tracking branch 'ftrace/for-next'
[karo-tx-linux.git] / include / trace / trace_events.h
1 /*
2  * Stage 1 of the trace events.
3  *
4  * Override the macros in <trace/trace_events.h> to include the following:
5  *
6  * struct trace_event_raw_<call> {
7  *      struct trace_entry              ent;
8  *      <type>                          <item>;
9  *      <type2>                         <item2>[<len>];
10  *      [...]
11  * };
12  *
13  * The <type> <item> is created by the __field(type, item) macro or
14  * the __array(type2, item2, len) macro.
15  * We simply do "type item;", and that will create the fields
16  * in the structure.
17  */
18
19 #include <linux/trace_events.h>
20
21 #ifndef TRACE_SYSTEM_VAR
22 #define TRACE_SYSTEM_VAR TRACE_SYSTEM
23 #endif
24
25 #define __app__(x, y) str__##x##y
26 #define __app(x, y) __app__(x, y)
27
28 #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
29
30 #define TRACE_MAKE_SYSTEM_STR()                         \
31         static const char TRACE_SYSTEM_STRING[] =       \
32                 __stringify(TRACE_SYSTEM)
33
34 TRACE_MAKE_SYSTEM_STR();
35
36 #undef TRACE_DEFINE_ENUM
37 #define TRACE_DEFINE_ENUM(a)                            \
38         static struct trace_enum_map __used __initdata  \
39         __##TRACE_SYSTEM##_##a =                        \
40         {                                               \
41                 .system = TRACE_SYSTEM_STRING,          \
42                 .enum_string = #a,                      \
43                 .enum_value = a                         \
44         };                                              \
45         static struct trace_enum_map __used             \
46         __attribute__((section("_ftrace_enum_map")))    \
47         *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
48
49 /*
50  * DECLARE_EVENT_CLASS can be used to add a generic function
51  * handlers for events. That is, if all events have the same
52  * parameters and just have distinct trace points.
53  * Each tracepoint can be defined with DEFINE_EVENT and that
54  * will map the DECLARE_EVENT_CLASS to the tracepoint.
55  *
56  * TRACE_EVENT is a one to one mapping between tracepoint and template.
57  */
58 #undef TRACE_EVENT
59 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
60         DECLARE_EVENT_CLASS(name,                              \
61                              PARAMS(proto),                    \
62                              PARAMS(args),                     \
63                              PARAMS(tstruct),                  \
64                              PARAMS(assign),                   \
65                              PARAMS(print));                   \
66         DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
67
68
69 #undef __field
70 #define __field(type, item)             type    item;
71
72 #undef __field_ext
73 #define __field_ext(type, item, filter_type)    type    item;
74
75 #undef __field_struct
76 #define __field_struct(type, item)      type    item;
77
78 #undef __field_struct_ext
79 #define __field_struct_ext(type, item, filter_type)     type    item;
80
81 #undef __array
82 #define __array(type, item, len)        type    item[len];
83
84 #undef __dynamic_array
85 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
86
87 #undef __string
88 #define __string(item, src) __dynamic_array(char, item, -1)
89
90 #undef __bitmask
91 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
92
93 #undef TP_STRUCT__entry
94 #define TP_STRUCT__entry(args...) args
95
96 #undef DECLARE_EVENT_CLASS
97 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)  \
98         struct trace_event_raw_##name {                                 \
99                 struct trace_entry      ent;                            \
100                 tstruct                                                 \
101                 char                    __data[0];                      \
102         };                                                              \
103                                                                         \
104         static struct trace_event_class event_class_##name;
105
106 #undef DEFINE_EVENT
107 #define DEFINE_EVENT(template, name, proto, args)       \
108         static struct trace_event_call  __used          \
109         __attribute__((__aligned__(4))) event_##name
110
111 #undef DEFINE_EVENT_FN
112 #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)        \
113         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
114
115 #undef DEFINE_EVENT_PRINT
116 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
117         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
118
119 /* Callbacks are meaningless to ftrace. */
120 #undef TRACE_EVENT_FN
121 #define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
122                 assign, print, reg, unreg)                              \
123         TRACE_EVENT(name, PARAMS(proto), PARAMS(args),                  \
124                 PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
125
126 #undef TRACE_EVENT_FLAGS
127 #define TRACE_EVENT_FLAGS(name, value)                                  \
128         __TRACE_EVENT_FLAGS(name, value)
129
130 #undef TRACE_EVENT_PERF_PERM
131 #define TRACE_EVENT_PERF_PERM(name, expr...)                            \
132         __TRACE_EVENT_PERF_PERM(name, expr)
133
134 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
135
136 /*
137  * Stage 2 of the trace events.
138  *
139  * Include the following:
140  *
141  * struct trace_event_data_offsets_<call> {
142  *      u32                             <item1>;
143  *      u32                             <item2>;
144  *      [...]
145  * };
146  *
147  * The __dynamic_array() macro will create each u32 <item>, this is
148  * to keep the offset of each array from the beginning of the event.
149  * The size of an array is also encoded, in the higher 16 bits of <item>.
150  */
151
152 #undef TRACE_DEFINE_ENUM
153 #define TRACE_DEFINE_ENUM(a)
154
155 #undef __field
156 #define __field(type, item)
157
158 #undef __field_ext
159 #define __field_ext(type, item, filter_type)
160
161 #undef __field_struct
162 #define __field_struct(type, item)
163
164 #undef __field_struct_ext
165 #define __field_struct_ext(type, item, filter_type)
166
167 #undef __array
168 #define __array(type, item, len)
169
170 #undef __dynamic_array
171 #define __dynamic_array(type, item, len)        u32 item;
172
173 #undef __string
174 #define __string(item, src) __dynamic_array(char, item, -1)
175
176 #undef __bitmask
177 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
178
179 #undef DECLARE_EVENT_CLASS
180 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
181         struct trace_event_data_offsets_##call {                        \
182                 tstruct;                                                \
183         };
184
185 #undef DEFINE_EVENT
186 #define DEFINE_EVENT(template, name, proto, args)
187
188 #undef DEFINE_EVENT_PRINT
189 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
190         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
191
192 #undef TRACE_EVENT_FLAGS
193 #define TRACE_EVENT_FLAGS(event, flag)
194
195 #undef TRACE_EVENT_PERF_PERM
196 #define TRACE_EVENT_PERF_PERM(event, expr...)
197
198 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
199
200 /*
201  * Stage 3 of the trace events.
202  *
203  * Override the macros in <trace/trace_events.h> to include the following:
204  *
205  * enum print_line_t
206  * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
207  * {
208  *      struct trace_seq *s = &iter->seq;
209  *      struct trace_event_raw_<call> *field; <-- defined in stage 1
210  *      struct trace_entry *entry;
211  *      struct trace_seq *p = &iter->tmp_seq;
212  *      int ret;
213  *
214  *      entry = iter->ent;
215  *
216  *      if (entry->type != event_<call>->event.type) {
217  *              WARN_ON_ONCE(1);
218  *              return TRACE_TYPE_UNHANDLED;
219  *      }
220  *
221  *      field = (typeof(field))entry;
222  *
223  *      trace_seq_init(p);
224  *      ret = trace_seq_printf(s, "%s: ", <call>);
225  *      if (ret)
226  *              ret = trace_seq_printf(s, <TP_printk> "\n");
227  *      if (!ret)
228  *              return TRACE_TYPE_PARTIAL_LINE;
229  *
230  *      return TRACE_TYPE_HANDLED;
231  * }
232  *
233  * This is the method used to print the raw event to the trace
234  * output format. Note, this is not needed if the data is read
235  * in binary.
236  */
237
238 #undef __entry
239 #define __entry field
240
241 #undef TP_printk
242 #define TP_printk(fmt, args...) fmt "\n", args
243
244 #undef __get_dynamic_array
245 #define __get_dynamic_array(field)      \
246                 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
247
248 #undef __get_dynamic_array_len
249 #define __get_dynamic_array_len(field)  \
250                 ((__entry->__data_loc_##field >> 16) & 0xffff)
251
252 #undef __get_str
253 #define __get_str(field) (char *)__get_dynamic_array(field)
254
255 #undef __get_bitmask
256 #define __get_bitmask(field)                                            \
257         ({                                                              \
258                 void *__bitmask = __get_dynamic_array(field);           \
259                 unsigned int __bitmask_size;                            \
260                 __bitmask_size = __get_dynamic_array_len(field);        \
261                 trace_print_bitmask_seq(p, __bitmask, __bitmask_size);  \
262         })
263
264 #undef __print_flags
265 #define __print_flags(flag, delim, flag_array...)                       \
266         ({                                                              \
267                 static const struct trace_print_flags __flags[] =       \
268                         { flag_array, { -1, NULL }};                    \
269                 trace_print_flags_seq(p, delim, flag, __flags); \
270         })
271
272 #undef __print_symbolic
273 #define __print_symbolic(value, symbol_array...)                        \
274         ({                                                              \
275                 static const struct trace_print_flags symbols[] =       \
276                         { symbol_array, { -1, NULL }};                  \
277                 trace_print_symbols_seq(p, value, symbols);             \
278         })
279
280 #undef __print_symbolic_u64
281 #if BITS_PER_LONG == 32
282 #define __print_symbolic_u64(value, symbol_array...)                    \
283         ({                                                              \
284                 static const struct trace_print_flags_u64 symbols[] =   \
285                         { symbol_array, { -1, NULL } };                 \
286                 trace_print_symbols_seq_u64(p, value, symbols); \
287         })
288 #else
289 #define __print_symbolic_u64(value, symbol_array...)                    \
290                         __print_symbolic(value, symbol_array)
291 #endif
292
293 #undef __print_hex
294 #define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
295
296 #undef __print_array
297 #define __print_array(array, count, el_size)                            \
298         ({                                                              \
299                 BUILD_BUG_ON(el_size != 1 && el_size != 2 &&            \
300                              el_size != 4 && el_size != 8);             \
301                 trace_print_array_seq(p, array, count, el_size);        \
302         })
303
304 #undef DECLARE_EVENT_CLASS
305 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
306 static notrace enum print_line_t                                        \
307 trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
308                         struct trace_event *trace_event)                \
309 {                                                                       \
310         struct trace_seq *s = &iter->seq;                               \
311         struct trace_seq __maybe_unused *p = &iter->tmp_seq;            \
312         struct trace_event_raw_##call *field;                           \
313         int ret;                                                        \
314                                                                         \
315         field = (typeof(field))iter->ent;                               \
316                                                                         \
317         ret = trace_raw_output_prep(iter, trace_event);                 \
318         if (ret != TRACE_TYPE_HANDLED)                                  \
319                 return ret;                                             \
320                                                                         \
321         trace_seq_printf(s, print);                                     \
322                                                                         \
323         return trace_handle_return(s);                                  \
324 }                                                                       \
325 static struct trace_event_functions trace_event_type_funcs_##call = {   \
326         .trace                  = trace_raw_output_##call,              \
327 };
328
329 #undef DEFINE_EVENT_PRINT
330 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
331 static notrace enum print_line_t                                        \
332 trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
333                          struct trace_event *event)                     \
334 {                                                                       \
335         struct trace_event_raw_##template *field;                       \
336         struct trace_entry *entry;                                      \
337         struct trace_seq *p = &iter->tmp_seq;                           \
338                                                                         \
339         entry = iter->ent;                                              \
340                                                                         \
341         if (entry->type != event_##call.event.type) {                   \
342                 WARN_ON_ONCE(1);                                        \
343                 return TRACE_TYPE_UNHANDLED;                            \
344         }                                                               \
345                                                                         \
346         field = (typeof(field))entry;                                   \
347                                                                         \
348         trace_seq_init(p);                                              \
349         return trace_output_call(iter, #call, print);                   \
350 }                                                                       \
351 static struct trace_event_functions trace_event_type_funcs_##call = {   \
352         .trace                  = trace_raw_output_##call,              \
353 };
354
355 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
356
357 #undef __field_ext
358 #define __field_ext(type, item, filter_type)                            \
359         ret = trace_define_field(event_call, #type, #item,              \
360                                  offsetof(typeof(field), item),         \
361                                  sizeof(field.item),                    \
362                                  is_signed_type(type), filter_type);    \
363         if (ret)                                                        \
364                 return ret;
365
366 #undef __field_struct_ext
367 #define __field_struct_ext(type, item, filter_type)                     \
368         ret = trace_define_field(event_call, #type, #item,              \
369                                  offsetof(typeof(field), item),         \
370                                  sizeof(field.item),                    \
371                                  0, filter_type);                       \
372         if (ret)                                                        \
373                 return ret;
374
375 #undef __field
376 #define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
377
378 #undef __field_struct
379 #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
380
381 #undef __array
382 #define __array(type, item, len)                                        \
383         do {                                                            \
384                 char *type_str = #type"["__stringify(len)"]";           \
385                 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
386                 ret = trace_define_field(event_call, type_str, #item,   \
387                                  offsetof(typeof(field), item),         \
388                                  sizeof(field.item),                    \
389                                  is_signed_type(type), FILTER_OTHER);   \
390                 if (ret)                                                \
391                         return ret;                                     \
392         } while (0);
393
394 #undef __dynamic_array
395 #define __dynamic_array(type, item, len)                                       \
396         ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
397                                  offsetof(typeof(field), __data_loc_##item),   \
398                                  sizeof(field.__data_loc_##item),              \
399                                  is_signed_type(type), FILTER_OTHER);
400
401 #undef __string
402 #define __string(item, src) __dynamic_array(char, item, -1)
403
404 #undef __bitmask
405 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
406
407 #undef DECLARE_EVENT_CLASS
408 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
409 static int notrace __init                                               \
410 trace_event_define_fields_##call(struct trace_event_call *event_call)   \
411 {                                                                       \
412         struct trace_event_raw_##call field;                            \
413         int ret;                                                        \
414                                                                         \
415         tstruct;                                                        \
416                                                                         \
417         return ret;                                                     \
418 }
419
420 #undef DEFINE_EVENT
421 #define DEFINE_EVENT(template, name, proto, args)
422
423 #undef DEFINE_EVENT_PRINT
424 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
425         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
426
427 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
428
429 /*
430  * remember the offset of each array from the beginning of the event.
431  */
432
433 #undef __entry
434 #define __entry entry
435
436 #undef __field
437 #define __field(type, item)
438
439 #undef __field_ext
440 #define __field_ext(type, item, filter_type)
441
442 #undef __field_struct
443 #define __field_struct(type, item)
444
445 #undef __field_struct_ext
446 #define __field_struct_ext(type, item, filter_type)
447
448 #undef __array
449 #define __array(type, item, len)
450
451 #undef __dynamic_array
452 #define __dynamic_array(type, item, len)                                \
453         __item_length = (len) * sizeof(type);                           \
454         __data_offsets->item = __data_size +                            \
455                                offsetof(typeof(*entry), __data);        \
456         __data_offsets->item |= __item_length << 16;                    \
457         __data_size += __item_length;
458
459 #undef __string
460 #define __string(item, src) __dynamic_array(char, item,                 \
461                     strlen((src) ? (const char *)(src) : "(null)") + 1)
462
463 /*
464  * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
465  * num_possible_cpus().
466  */
467 #define __bitmask_size_in_bytes_raw(nr_bits)    \
468         (((nr_bits) + 7) / 8)
469
470 #define __bitmask_size_in_longs(nr_bits)                        \
471         ((__bitmask_size_in_bytes_raw(nr_bits) +                \
472           ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
473
474 /*
475  * __bitmask_size_in_bytes is the number of bytes needed to hold
476  * num_possible_cpus() padded out to the nearest long. This is what
477  * is saved in the buffer, just to be consistent.
478  */
479 #define __bitmask_size_in_bytes(nr_bits)                                \
480         (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
481
482 #undef __bitmask
483 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item,   \
484                                          __bitmask_size_in_longs(nr_bits))
485
486 #undef DECLARE_EVENT_CLASS
487 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
488 static inline notrace int trace_event_get_offsets_##call(               \
489         struct trace_event_data_offsets_##call *__data_offsets, proto)  \
490 {                                                                       \
491         int __data_size = 0;                                            \
492         int __maybe_unused __item_length;                               \
493         struct trace_event_raw_##call __maybe_unused *entry;            \
494                                                                         \
495         tstruct;                                                        \
496                                                                         \
497         return __data_size;                                             \
498 }
499
500 #undef DEFINE_EVENT
501 #define DEFINE_EVENT(template, name, proto, args)
502
503 #undef DEFINE_EVENT_PRINT
504 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
505         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
506
507 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
508
509 /*
510  * Stage 4 of the trace events.
511  *
512  * Override the macros in <trace/trace_events.h> to include the following:
513  *
514  * For those macros defined with TRACE_EVENT:
515  *
516  * static struct trace_event_call event_<call>;
517  *
518  * static void trace_event_raw_event_<call>(void *__data, proto)
519  * {
520  *      struct trace_event_file *trace_file = __data;
521  *      struct trace_event_call *event_call = trace_file->event_call;
522  *      struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
523  *      unsigned long eflags = trace_file->flags;
524  *      enum event_trigger_type __tt = ETT_NONE;
525  *      struct ring_buffer_event *event;
526  *      struct trace_event_raw_<call> *entry; <-- defined in stage 1
527  *      struct ring_buffer *buffer;
528  *      unsigned long irq_flags;
529  *      int __data_size;
530  *      int pc;
531  *
532  *      if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
533  *              if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
534  *                      event_triggers_call(trace_file, NULL);
535  *              if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
536  *                      return;
537  *      }
538  *
539  *      local_save_flags(irq_flags);
540  *      pc = preempt_count();
541  *
542  *      __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
543  *
544  *      event = trace_event_buffer_lock_reserve(&buffer, trace_file,
545  *                                event_<call>->event.type,
546  *                                sizeof(*entry) + __data_size,
547  *                                irq_flags, pc);
548  *      if (!event)
549  *              return;
550  *      entry   = ring_buffer_event_data(event);
551  *
552  *      { <assign>; }  <-- Here we assign the entries by the __field and
553  *                         __array macros.
554  *
555  *      if (eflags & EVENT_FILE_FL_TRIGGER_COND)
556  *              __tt = event_triggers_call(trace_file, entry);
557  *
558  *      if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
559  *                   &trace_file->flags))
560  *              ring_buffer_discard_commit(buffer, event);
561  *      else if (!filter_check_discard(trace_file, entry, buffer, event))
562  *              trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
563  *
564  *      if (__tt)
565  *              event_triggers_post_call(trace_file, __tt);
566  * }
567  *
568  * static struct trace_event ftrace_event_type_<call> = {
569  *      .trace                  = trace_raw_output_<call>, <-- stage 2
570  * };
571  *
572  * static char print_fmt_<call>[] = <TP_printk>;
573  *
574  * static struct trace_event_class __used event_class_<template> = {
575  *      .system                 = "<system>",
576  *      .define_fields          = trace_event_define_fields_<call>,
577  *      .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
578  *      .raw_init               = trace_event_raw_init,
579  *      .probe                  = trace_event_raw_event_##call,
580  *      .reg                    = trace_event_reg,
581  * };
582  *
583  * static struct trace_event_call event_<call> = {
584  *      .class                  = event_class_<template>,
585  *      {
586  *              .tp                     = &__tracepoint_<call>,
587  *      },
588  *      .event                  = &ftrace_event_type_<call>,
589  *      .print_fmt              = print_fmt_<call>,
590  *      .flags                  = TRACE_EVENT_FL_TRACEPOINT,
591  * };
592  * // its only safe to use pointers when doing linker tricks to
593  * // create an array.
594  * static struct trace_event_call __used
595  * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
596  *
597  */
598
599 #ifdef CONFIG_PERF_EVENTS
600
601 #define _TRACE_PERF_PROTO(call, proto)                                  \
602         static notrace void                                             \
603         perf_trace_##call(void *__data, proto);
604
605 #define _TRACE_PERF_INIT(call)                                          \
606         .perf_probe             = perf_trace_##call,
607
608 #else
609 #define _TRACE_PERF_PROTO(call, proto)
610 #define _TRACE_PERF_INIT(call)
611 #endif /* CONFIG_PERF_EVENTS */
612
613 #undef __entry
614 #define __entry entry
615
616 #undef __field
617 #define __field(type, item)
618
619 #undef __field_struct
620 #define __field_struct(type, item)
621
622 #undef __array
623 #define __array(type, item, len)
624
625 #undef __dynamic_array
626 #define __dynamic_array(type, item, len)                                \
627         __entry->__data_loc_##item = __data_offsets.item;
628
629 #undef __string
630 #define __string(item, src) __dynamic_array(char, item, -1)
631
632 #undef __assign_str
633 #define __assign_str(dst, src)                                          \
634         strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
635
636 #undef __bitmask
637 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
638
639 #undef __get_bitmask
640 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
641
642 #undef __assign_bitmask
643 #define __assign_bitmask(dst, src, nr_bits)                                     \
644         memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
645
646 #undef TP_fast_assign
647 #define TP_fast_assign(args...) args
648
649 #undef __perf_addr
650 #define __perf_addr(a)  (a)
651
652 #undef __perf_count
653 #define __perf_count(c) (c)
654
655 #undef __perf_task
656 #define __perf_task(t)  (t)
657
658 #undef DECLARE_EVENT_CLASS
659 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
660                                                                         \
661 static notrace void                                                     \
662 trace_event_raw_event_##call(void *__data, proto)                       \
663 {                                                                       \
664         struct trace_event_file *trace_file = __data;                   \
665         struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
666         struct trace_event_buffer fbuffer;                              \
667         struct trace_event_raw_##call *entry;                           \
668         int __data_size;                                                \
669                                                                         \
670         if (trace_trigger_soft_disabled(trace_file))                    \
671                 return;                                                 \
672                                                                         \
673         __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
674                                                                         \
675         entry = trace_event_buffer_reserve(&fbuffer, trace_file,        \
676                                  sizeof(*entry) + __data_size);         \
677                                                                         \
678         if (!entry)                                                     \
679                 return;                                                 \
680                                                                         \
681         tstruct                                                         \
682                                                                         \
683         { assign; }                                                     \
684                                                                         \
685         trace_event_buffer_commit(&fbuffer);                            \
686 }
687 /*
688  * The ftrace_test_probe is compiled out, it is only here as a build time check
689  * to make sure that if the tracepoint handling changes, the ftrace probe will
690  * fail to compile unless it too is updated.
691  */
692
693 #undef DEFINE_EVENT
694 #define DEFINE_EVENT(template, call, proto, args)                       \
695 static inline void ftrace_test_probe_##call(void)                       \
696 {                                                                       \
697         check_trace_callback_type_##call(trace_event_raw_event_##template); \
698 }
699
700 #undef DEFINE_EVENT_PRINT
701 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
702
703 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
704
705 #undef __entry
706 #define __entry REC
707
708 #undef __print_flags
709 #undef __print_symbolic
710 #undef __print_hex
711 #undef __get_dynamic_array
712 #undef __get_dynamic_array_len
713 #undef __get_str
714 #undef __get_bitmask
715 #undef __print_array
716
717 #undef TP_printk
718 #define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
719
720 #undef DECLARE_EVENT_CLASS
721 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
722 _TRACE_PERF_PROTO(call, PARAMS(proto));                                 \
723 static char print_fmt_##call[] = print;                                 \
724 static struct trace_event_class __used __refdata event_class_##call = { \
725         .system                 = TRACE_SYSTEM_STRING,                  \
726         .define_fields          = trace_event_define_fields_##call,     \
727         .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
728         .raw_init               = trace_event_raw_init,                 \
729         .probe                  = trace_event_raw_event_##call,         \
730         .reg                    = trace_event_reg,                      \
731         _TRACE_PERF_INIT(call)                                          \
732 };
733
734 #undef DEFINE_EVENT
735 #define DEFINE_EVENT(template, call, proto, args)                       \
736                                                                         \
737 static struct trace_event_call __used event_##call = {                  \
738         .class                  = &event_class_##template,              \
739         {                                                               \
740                 .tp                     = &__tracepoint_##call,         \
741         },                                                              \
742         .event.funcs            = &trace_event_type_funcs_##template,   \
743         .print_fmt              = print_fmt_##template,                 \
744         .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
745 };                                                                      \
746 static struct trace_event_call __used                                   \
747 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
748
749 #undef DEFINE_EVENT_PRINT
750 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
751                                                                         \
752 static char print_fmt_##call[] = print;                                 \
753                                                                         \
754 static struct trace_event_call __used event_##call = {                  \
755         .class                  = &event_class_##template,              \
756         {                                                               \
757                 .tp                     = &__tracepoint_##call,         \
758         },                                                              \
759         .event.funcs            = &trace_event_type_funcs_##call,       \
760         .print_fmt              = print_fmt_##call,                     \
761         .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
762 };                                                                      \
763 static struct trace_event_call __used                                   \
764 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
765
766 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)