diff options
Diffstat (limited to 'kernel/include/trace')
71 files changed, 20267 insertions, 0 deletions
diff --git a/kernel/include/trace/define_trace.h b/kernel/include/trace/define_trace.h new file mode 100644 index 000000000..02e100356 --- /dev/null +++ b/kernel/include/trace/define_trace.h @@ -0,0 +1,118 @@ +/* + * Trace files that want to automate creation of all tracepoints defined + * in their file should include this file. The following are macros that the + * trace file may define: + * + * TRACE_SYSTEM defines the system the tracepoint is for + * + * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h + * This macro may be defined to tell define_trace.h what file to include. + * Note, leave off the ".h". + * + * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace + * then this macro can define the path to use. Note, the path is relative to + * define_trace.h, not the file including it. Full path names for out of tree + * modules must be used. + */ + +#ifdef CREATE_TRACE_POINTS + +/* Prevent recursion */ +#undef CREATE_TRACE_POINTS + +#include <linux/stringify.h> + +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ + DEFINE_TRACE(name) + +#undef TRACE_EVENT_CONDITION +#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \ + TRACE_EVENT(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)) + +#undef TRACE_EVENT_FN +#define TRACE_EVENT_FN(name, proto, args, tstruct, \ + assign, print, reg, unreg) \ + DEFINE_TRACE_FN(name, reg, unreg) + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ + DEFINE_TRACE(name) + +#undef DEFINE_EVENT_FN +#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ + DEFINE_TRACE_FN(name, reg, unreg) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_TRACE(name) + +#undef DEFINE_EVENT_CONDITION +#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#undef DECLARE_TRACE +#define DECLARE_TRACE(name, proto, args) \ + DEFINE_TRACE(name) + +#undef TRACE_INCLUDE +#undef __TRACE_INCLUDE + +#ifndef TRACE_INCLUDE_FILE +# define TRACE_INCLUDE_FILE TRACE_SYSTEM +# define UNDEF_TRACE_INCLUDE_FILE +#endif + +#ifndef TRACE_INCLUDE_PATH +# define __TRACE_INCLUDE(system) <trace/events/system.h> +# define UNDEF_TRACE_INCLUDE_PATH +#else +# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h) +#endif + +# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system) + +/* Let the trace headers be reread */ +#define TRACE_HEADER_MULTI_READ + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Make all open coded DECLARE_TRACE nops */ +#undef DECLARE_TRACE +#define DECLARE_TRACE(name, proto, args) + +#ifdef CONFIG_EVENT_TRACING +#include <trace/ftrace.h> +#endif + +#undef TRACE_EVENT +#undef TRACE_EVENT_FN +#undef TRACE_EVENT_CONDITION +#undef DECLARE_EVENT_CLASS +#undef DEFINE_EVENT +#undef DEFINE_EVENT_FN +#undef DEFINE_EVENT_PRINT +#undef DEFINE_EVENT_CONDITION +#undef TRACE_HEADER_MULTI_READ +#undef DECLARE_TRACE + +/* Only undef what we defined in this file */ +#ifdef UNDEF_TRACE_INCLUDE_FILE +# undef TRACE_INCLUDE_FILE +# undef UNDEF_TRACE_INCLUDE_FILE +#endif + +#ifdef UNDEF_TRACE_INCLUDE_PATH +# undef TRACE_INCLUDE_PATH +# undef UNDEF_TRACE_INCLUDE_PATH +#endif + +/* We may be processing more files */ +#define CREATE_TRACE_POINTS + +#endif /* CREATE_TRACE_POINTS */ diff --git a/kernel/include/trace/events/9p.h b/kernel/include/trace/events/9p.h new file mode 100644 index 000000000..633ee9ee9 --- /dev/null +++ b/kernel/include/trace/events/9p.h @@ -0,0 +1,173 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM 9p + +#if !defined(_TRACE_9P_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_9P_H + +#include <linux/tracepoint.h> + +#define P9_MSG_T \ + EM( P9_TLERROR, "P9_TLERROR" ) \ + EM( P9_RLERROR, "P9_RLERROR" ) \ + EM( P9_TSTATFS, "P9_TSTATFS" ) \ + EM( P9_RSTATFS, "P9_RSTATFS" ) \ + EM( P9_TLOPEN, "P9_TLOPEN" ) \ + EM( P9_RLOPEN, "P9_RLOPEN" ) \ + EM( P9_TLCREATE, "P9_TLCREATE" ) \ + EM( P9_RLCREATE, "P9_RLCREATE" ) \ + EM( P9_TSYMLINK, "P9_TSYMLINK" ) \ + EM( P9_RSYMLINK, "P9_RSYMLINK" ) \ + EM( P9_TMKNOD, "P9_TMKNOD" ) \ + EM( P9_RMKNOD, "P9_RMKNOD" ) \ + EM( P9_TRENAME, "P9_TRENAME" ) \ + EM( P9_RRENAME, "P9_RRENAME" ) \ + EM( P9_TREADLINK, "P9_TREADLINK" ) \ + EM( P9_RREADLINK, "P9_RREADLINK" ) \ + EM( P9_TGETATTR, "P9_TGETATTR" ) \ + EM( P9_RGETATTR, "P9_RGETATTR" ) \ + EM( P9_TSETATTR, "P9_TSETATTR" ) \ + EM( P9_RSETATTR, "P9_RSETATTR" ) \ + EM( P9_TXATTRWALK, "P9_TXATTRWALK" ) \ + EM( P9_RXATTRWALK, "P9_RXATTRWALK" ) \ + EM( P9_TXATTRCREATE, "P9_TXATTRCREATE" ) \ + EM( P9_RXATTRCREATE, "P9_RXATTRCREATE" ) \ + EM( P9_TREADDIR, "P9_TREADDIR" ) \ + EM( P9_RREADDIR, "P9_RREADDIR" ) \ + EM( P9_TFSYNC, "P9_TFSYNC" ) \ + EM( P9_RFSYNC, "P9_RFSYNC" ) \ + EM( P9_TLOCK, "P9_TLOCK" ) \ + EM( P9_RLOCK, "P9_RLOCK" ) \ + EM( P9_TGETLOCK, "P9_TGETLOCK" ) \ + EM( P9_RGETLOCK, "P9_RGETLOCK" ) \ + EM( P9_TLINK, "P9_TLINK" ) \ + EM( P9_RLINK, "P9_RLINK" ) \ + EM( P9_TMKDIR, "P9_TMKDIR" ) \ + EM( P9_RMKDIR, "P9_RMKDIR" ) \ + EM( P9_TRENAMEAT, "P9_TRENAMEAT" ) \ + EM( P9_RRENAMEAT, "P9_RRENAMEAT" ) \ + EM( P9_TUNLINKAT, "P9_TUNLINKAT" ) \ + EM( P9_RUNLINKAT, "P9_RUNLINKAT" ) \ + EM( P9_TVERSION, "P9_TVERSION" ) \ + EM( P9_RVERSION, "P9_RVERSION" ) \ + EM( P9_TAUTH, "P9_TAUTH" ) \ + EM( P9_RAUTH, "P9_RAUTH" ) \ + EM( P9_TATTACH, "P9_TATTACH" ) \ + EM( P9_RATTACH, "P9_RATTACH" ) \ + EM( P9_TERROR, "P9_TERROR" ) \ + EM( P9_RERROR, "P9_RERROR" ) \ + EM( P9_TFLUSH, "P9_TFLUSH" ) \ + EM( P9_RFLUSH, "P9_RFLUSH" ) \ + EM( P9_TWALK, "P9_TWALK" ) \ + EM( P9_RWALK, "P9_RWALK" ) \ + EM( P9_TOPEN, "P9_TOPEN" ) \ + EM( P9_ROPEN, "P9_ROPEN" ) \ + EM( P9_TCREATE, "P9_TCREATE" ) \ + EM( P9_RCREATE, "P9_RCREATE" ) \ + EM( P9_TREAD, "P9_TREAD" ) \ + EM( P9_RREAD, "P9_RREAD" ) \ + EM( P9_TWRITE, "P9_TWRITE" ) \ + EM( P9_RWRITE, "P9_RWRITE" ) \ + EM( P9_TCLUNK, "P9_TCLUNK" ) \ + EM( P9_RCLUNK, "P9_RCLUNK" ) \ + EM( P9_TREMOVE, "P9_TREMOVE" ) \ + EM( P9_RREMOVE, "P9_RREMOVE" ) \ + EM( P9_TSTAT, "P9_TSTAT" ) \ + EM( P9_RSTAT, "P9_RSTAT" ) \ + EM( P9_TWSTAT, "P9_TWSTAT" ) \ + EMe(P9_RWSTAT, "P9_RWSTAT" ) + +/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +P9_MSG_T + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) { a, b }, +#define EMe(a, b) { a, b } + +#define show_9p_op(type) \ + __print_symbolic(type, P9_MSG_T) + +TRACE_EVENT(9p_client_req, + TP_PROTO(struct p9_client *clnt, int8_t type, int tag), + + TP_ARGS(clnt, type, tag), + + TP_STRUCT__entry( + __field( void *, clnt ) + __field( __u8, type ) + __field( __u32, tag ) + ), + + TP_fast_assign( + __entry->clnt = clnt; + __entry->type = type; + __entry->tag = tag; + ), + + TP_printk("client %lu request %s tag %d", + (long)__entry->clnt, show_9p_op(__entry->type), + __entry->tag) + ); + +TRACE_EVENT(9p_client_res, + TP_PROTO(struct p9_client *clnt, int8_t type, int tag, int err), + + TP_ARGS(clnt, type, tag, err), + + TP_STRUCT__entry( + __field( void *, clnt ) + __field( __u8, type ) + __field( __u32, tag ) + __field( __u32, err ) + ), + + TP_fast_assign( + __entry->clnt = clnt; + __entry->type = type; + __entry->tag = tag; + __entry->err = err; + ), + + TP_printk("client %lu response %s tag %d err %d", + (long)__entry->clnt, show_9p_op(__entry->type), + __entry->tag, __entry->err) +); + +/* dump 32 bytes of protocol data */ +#define P9_PROTO_DUMP_SZ 32 +TRACE_EVENT(9p_protocol_dump, + TP_PROTO(struct p9_client *clnt, struct p9_fcall *pdu), + + TP_ARGS(clnt, pdu), + + TP_STRUCT__entry( + __field( void *, clnt ) + __field( __u8, type ) + __field( __u16, tag ) + __array( unsigned char, line, P9_PROTO_DUMP_SZ ) + ), + + TP_fast_assign( + __entry->clnt = clnt; + __entry->type = pdu->id; + __entry->tag = pdu->tag; + memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ); + ), + TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n", + (unsigned long)__entry->clnt, show_9p_op(__entry->type), + __entry->tag, 0, __entry->line, 16, __entry->line + 16) + ); + +#endif /* _TRACE_9P_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/asoc.h b/kernel/include/trace/events/asoc.h new file mode 100644 index 000000000..88cf39d96 --- /dev/null +++ b/kernel/include/trace/events/asoc.h @@ -0,0 +1,294 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM asoc + +#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ASOC_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +#define DAPM_DIRECT "(direct)" + +struct snd_soc_jack; +struct snd_soc_codec; +struct snd_soc_card; +struct snd_soc_dapm_widget; +struct snd_soc_dapm_path; + +DECLARE_EVENT_CLASS(snd_soc_card, + + TP_PROTO(struct snd_soc_card *card, int val), + + TP_ARGS(card, val), + + TP_STRUCT__entry( + __string( name, card->name ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, card->name); + __entry->val = val; + ), + + TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val) +); + +DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start, + + TP_PROTO(struct snd_soc_card *card, int val), + + TP_ARGS(card, val) + +); + +DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done, + + TP_PROTO(struct snd_soc_card *card, int val), + + TP_ARGS(card, val) + +); + +DECLARE_EVENT_CLASS(snd_soc_dapm_basic, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card), + + TP_STRUCT__entry( + __string( name, card->name ) + ), + + TP_fast_assign( + __assign_str(name, card->name); + ), + + TP_printk("card=%s", __get_str(name)) +); + +DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card) + +); + +DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card) + +); + +DECLARE_EVENT_CLASS(snd_soc_dapm_widget, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val), + + TP_STRUCT__entry( + __string( name, w->name ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, w->name); + __entry->val = val; + ), + + TP_printk("widget=%s val=%d", __get_str(name), + (int)__entry->val) +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val) + +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val) + +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done, + + TP_PROTO(struct snd_soc_dapm_widget *w, int val), + + TP_ARGS(w, val) + +); + +TRACE_EVENT(snd_soc_dapm_walk_done, + + TP_PROTO(struct snd_soc_card *card), + + TP_ARGS(card), + + TP_STRUCT__entry( + __string( name, card->name ) + __field( int, power_checks ) + __field( int, path_checks ) + __field( int, neighbour_checks ) + ), + + TP_fast_assign( + __assign_str(name, card->name); + __entry->power_checks = card->dapm_stats.power_checks; + __entry->path_checks = card->dapm_stats.path_checks; + __entry->neighbour_checks = card->dapm_stats.neighbour_checks; + ), + + TP_printk("%s: checks %d power, %d path, %d neighbour", + __get_str(name), (int)__entry->power_checks, + (int)__entry->path_checks, (int)__entry->neighbour_checks) +); + +TRACE_EVENT(snd_soc_dapm_output_path, + + TP_PROTO(struct snd_soc_dapm_widget *widget, + struct snd_soc_dapm_path *path), + + TP_ARGS(widget, path), + + TP_STRUCT__entry( + __string( wname, widget->name ) + __string( pname, path->name ? path->name : DAPM_DIRECT) + __string( psname, path->sink->name ) + __field( int, path_sink ) + __field( int, path_connect ) + ), + + TP_fast_assign( + __assign_str(wname, widget->name); + __assign_str(pname, path->name ? path->name : DAPM_DIRECT); + __assign_str(psname, path->sink->name); + __entry->path_connect = path->connect; + __entry->path_sink = (long)path->sink; + ), + + TP_printk("%c%s -> %s -> %s", + (int) __entry->path_sink && + (int) __entry->path_connect ? '*' : ' ', + __get_str(wname), __get_str(pname), __get_str(psname)) +); + +TRACE_EVENT(snd_soc_dapm_input_path, + + TP_PROTO(struct snd_soc_dapm_widget *widget, + struct snd_soc_dapm_path *path), + + TP_ARGS(widget, path), + + TP_STRUCT__entry( + __string( wname, widget->name ) + __string( pname, path->name ? path->name : DAPM_DIRECT) + __string( psname, path->source->name ) + __field( int, path_source ) + __field( int, path_connect ) + ), + + TP_fast_assign( + __assign_str(wname, widget->name); + __assign_str(pname, path->name ? path->name : DAPM_DIRECT); + __assign_str(psname, path->source->name); + __entry->path_connect = path->connect; + __entry->path_source = (long)path->source; + ), + + TP_printk("%c%s <- %s <- %s", + (int) __entry->path_source && + (int) __entry->path_connect ? '*' : ' ', + __get_str(wname), __get_str(pname), __get_str(psname)) +); + +TRACE_EVENT(snd_soc_dapm_connected, + + TP_PROTO(int paths, int stream), + + TP_ARGS(paths, stream), + + TP_STRUCT__entry( + __field( int, paths ) + __field( int, stream ) + ), + + TP_fast_assign( + __entry->paths = paths; + __entry->stream = stream; + ), + + TP_printk("%s: found %d paths", + __entry->stream ? "capture" : "playback", __entry->paths) +); + +TRACE_EVENT(snd_soc_jack_irq, + + TP_PROTO(const char *name), + + TP_ARGS(name), + + TP_STRUCT__entry( + __string( name, name ) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("%s", __get_str(name)) +); + +TRACE_EVENT(snd_soc_jack_report, + + TP_PROTO(struct snd_soc_jack *jack, int mask, int val), + + TP_ARGS(jack, mask, val), + + TP_STRUCT__entry( + __string( name, jack->jack->name ) + __field( int, mask ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, jack->jack->name); + __entry->mask = mask; + __entry->val = val; + ), + + TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val, + (int)__entry->mask) +); + +TRACE_EVENT(snd_soc_jack_notify, + + TP_PROTO(struct snd_soc_jack *jack, int val), + + TP_ARGS(jack, val), + + TP_STRUCT__entry( + __string( name, jack->jack->name ) + __field( int, val ) + ), + + TP_fast_assign( + __assign_str(name, jack->jack->name); + __entry->val = val; + ), + + TP_printk("jack=%s %x", __get_str(name), (int)__entry->val) +); + +#endif /* _TRACE_ASOC_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/bcache.h b/kernel/include/trace/events/bcache.h new file mode 100644 index 000000000..981acf74b --- /dev/null +++ b/kernel/include/trace/events/bcache.h @@ -0,0 +1,482 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bcache + +#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BCACHE_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(bcache_request, + TP_PROTO(struct bcache_device *d, struct bio *bio), + TP_ARGS(d, bio), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(unsigned int, orig_major ) + __field(unsigned int, orig_minor ) + __field(sector_t, sector ) + __field(dev_t, orig_sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->orig_major = d->disk->major; + __entry->orig_minor = d->disk->first_minor; + __entry->sector = bio->bi_iter.bi_sector; + __entry->orig_sector = bio->bi_iter.bi_sector - 16; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + ), + + TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->orig_major, __entry->orig_minor, + (unsigned long long)__entry->orig_sector) +); + +DECLARE_EVENT_CLASS(bkey, + TP_PROTO(struct bkey *k), + TP_ARGS(k), + + TP_STRUCT__entry( + __field(u32, size ) + __field(u32, inode ) + __field(u64, offset ) + __field(bool, dirty ) + ), + + TP_fast_assign( + __entry->inode = KEY_INODE(k); + __entry->offset = KEY_OFFSET(k); + __entry->size = KEY_SIZE(k); + __entry->dirty = KEY_DIRTY(k); + ), + + TP_printk("%u:%llu len %u dirty %u", __entry->inode, + __entry->offset, __entry->size, __entry->dirty) +); + +DECLARE_EVENT_CLASS(btree_node, + TP_PROTO(struct btree *b), + TP_ARGS(b), + + TP_STRUCT__entry( + __field(size_t, bucket ) + ), + + TP_fast_assign( + __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); + ), + + TP_printk("bucket %zu", __entry->bucket) +); + +/* request.c */ + +DEFINE_EVENT(bcache_request, bcache_request_start, + TP_PROTO(struct bcache_device *d, struct bio *bio), + TP_ARGS(d, bio) +); + +DEFINE_EVENT(bcache_request, bcache_request_end, + TP_PROTO(struct bcache_device *d, struct bio *bio), + TP_ARGS(d, bio) +); + +DECLARE_EVENT_CLASS(bcache_bio, + TP_PROTO(struct bio *bio), + TP_ARGS(bio), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(sector_t, sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + ), + + TP_printk("%d,%d %s %llu + %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, __entry->nr_sector) +); + +DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +DEFINE_EVENT(bcache_bio, bcache_bypass_congested, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +TRACE_EVENT(bcache_read, + TP_PROTO(struct bio *bio, bool hit, bool bypass), + TP_ARGS(bio, hit, bypass), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(sector_t, sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + __field(bool, cache_hit ) + __field(bool, bypass ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + __entry->cache_hit = hit; + __entry->bypass = bypass; + ), + + TP_printk("%d,%d %s %llu + %u hit %u bypass %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->cache_hit, __entry->bypass) +); + +TRACE_EVENT(bcache_write, + TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, + bool writeback, bool bypass), + TP_ARGS(c, inode, bio, writeback, bypass), + + TP_STRUCT__entry( + __array(char, uuid, 16 ) + __field(u64, inode ) + __field(sector_t, sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + __field(bool, writeback ) + __field(bool, bypass ) + ), + + TP_fast_assign( + memcpy(__entry->uuid, c->sb.set_uuid, 16); + __entry->inode = inode; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + __entry->writeback = writeback; + __entry->bypass = bypass; + ), + + TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u", + __entry->uuid, __entry->inode, + __entry->rwbs, (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->writeback, __entry->bypass) +); + +DEFINE_EVENT(bcache_bio, bcache_read_retry, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +DEFINE_EVENT(bkey, bcache_cache_insert, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); + +/* Journal */ + +DECLARE_EVENT_CLASS(cache_set, + TP_PROTO(struct cache_set *c), + TP_ARGS(c), + + TP_STRUCT__entry( + __array(char, uuid, 16 ) + ), + + TP_fast_assign( + memcpy(__entry->uuid, c->sb.set_uuid, 16); + ), + + TP_printk("%pU", __entry->uuid) +); + +DEFINE_EVENT(bkey, bcache_journal_replay_key, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); + +DEFINE_EVENT(cache_set, bcache_journal_full, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); + +DEFINE_EVENT(cache_set, bcache_journal_entry_full, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); + +DEFINE_EVENT(bcache_bio, bcache_journal_write, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +/* Btree */ + +DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); + +DEFINE_EVENT(btree_node, bcache_btree_read, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); + +TRACE_EVENT(bcache_btree_write, + TP_PROTO(struct btree *b), + TP_ARGS(b), + + TP_STRUCT__entry( + __field(size_t, bucket ) + __field(unsigned, block ) + __field(unsigned, keys ) + ), + + TP_fast_assign( + __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); + __entry->block = b->written; + __entry->keys = b->keys.set[b->keys.nsets].data->keys; + ), + + TP_printk("bucket %zu", __entry->bucket) +); + +DEFINE_EVENT(btree_node, bcache_btree_node_alloc, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); + +DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); + +DEFINE_EVENT(btree_node, bcache_btree_node_free, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); + +TRACE_EVENT(bcache_btree_gc_coalesce, + TP_PROTO(unsigned nodes), + TP_ARGS(nodes), + + TP_STRUCT__entry( + __field(unsigned, nodes ) + ), + + TP_fast_assign( + __entry->nodes = nodes; + ), + + TP_printk("coalesced %u nodes", __entry->nodes) +); + +DEFINE_EVENT(cache_set, bcache_gc_start, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); + +DEFINE_EVENT(cache_set, bcache_gc_end, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); + +DEFINE_EVENT(bkey, bcache_gc_copy, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); + +DEFINE_EVENT(bkey, bcache_gc_copy_collision, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); + +TRACE_EVENT(bcache_btree_insert_key, + TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), + TP_ARGS(b, k, op, status), + + TP_STRUCT__entry( + __field(u64, btree_node ) + __field(u32, btree_level ) + __field(u32, inode ) + __field(u64, offset ) + __field(u32, size ) + __field(u8, dirty ) + __field(u8, op ) + __field(u8, status ) + ), + + TP_fast_assign( + __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); + __entry->btree_level = b->level; + __entry->inode = KEY_INODE(k); + __entry->offset = KEY_OFFSET(k); + __entry->size = KEY_SIZE(k); + __entry->dirty = KEY_DIRTY(k); + __entry->op = op; + __entry->status = status; + ), + + TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u", + __entry->status, __entry->op, + __entry->btree_node, __entry->btree_level, + __entry->inode, __entry->offset, + __entry->size, __entry->dirty) +); + +DECLARE_EVENT_CLASS(btree_split, + TP_PROTO(struct btree *b, unsigned keys), + TP_ARGS(b, keys), + + TP_STRUCT__entry( + __field(size_t, bucket ) + __field(unsigned, keys ) + ), + + TP_fast_assign( + __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); + __entry->keys = keys; + ), + + TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) +); + +DEFINE_EVENT(btree_split, bcache_btree_node_split, + TP_PROTO(struct btree *b, unsigned keys), + TP_ARGS(b, keys) +); + +DEFINE_EVENT(btree_split, bcache_btree_node_compact, + TP_PROTO(struct btree *b, unsigned keys), + TP_ARGS(b, keys) +); + +DEFINE_EVENT(btree_node, bcache_btree_set_root, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); + +TRACE_EVENT(bcache_keyscan, + TP_PROTO(unsigned nr_found, + unsigned start_inode, uint64_t start_offset, + unsigned end_inode, uint64_t end_offset), + TP_ARGS(nr_found, + start_inode, start_offset, + end_inode, end_offset), + + TP_STRUCT__entry( + __field(__u32, nr_found ) + __field(__u32, start_inode ) + __field(__u64, start_offset ) + __field(__u32, end_inode ) + __field(__u64, end_offset ) + ), + + TP_fast_assign( + __entry->nr_found = nr_found; + __entry->start_inode = start_inode; + __entry->start_offset = start_offset; + __entry->end_inode = end_inode; + __entry->end_offset = end_offset; + ), + + TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found, + __entry->start_inode, __entry->start_offset, + __entry->end_inode, __entry->end_offset) +); + +/* Allocator */ + +TRACE_EVENT(bcache_invalidate, + TP_PROTO(struct cache *ca, size_t bucket), + TP_ARGS(ca, bucket), + + TP_STRUCT__entry( + __field(unsigned, sectors ) + __field(dev_t, dev ) + __field(__u64, offset ) + ), + + TP_fast_assign( + __entry->dev = ca->bdev->bd_dev; + __entry->offset = bucket << ca->set->bucket_bits; + __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); + ), + + TP_printk("invalidated %u sectors at %d,%d sector=%llu", + __entry->sectors, MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->offset) +); + +TRACE_EVENT(bcache_alloc, + TP_PROTO(struct cache *ca, size_t bucket), + TP_ARGS(ca, bucket), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(__u64, offset ) + ), + + TP_fast_assign( + __entry->dev = ca->bdev->bd_dev; + __entry->offset = bucket << ca->set->bucket_bits; + ), + + TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->offset) +); + +TRACE_EVENT(bcache_alloc_fail, + TP_PROTO(struct cache *ca, unsigned reserve), + TP_ARGS(ca, reserve), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(unsigned, free ) + __field(unsigned, free_inc ) + __field(unsigned, blocked ) + ), + + TP_fast_assign( + __entry->dev = ca->bdev->bd_dev; + __entry->free = fifo_used(&ca->free[reserve]); + __entry->free_inc = fifo_used(&ca->free_inc); + __entry->blocked = atomic_read(&ca->set->prio_blocked); + ), + + TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free, + __entry->free_inc, __entry->blocked) +); + +/* Background writeback */ + +DEFINE_EVENT(bkey, bcache_writeback, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); + +DEFINE_EVENT(bkey, bcache_writeback_collision, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); + +#endif /* _TRACE_BCACHE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/block.h b/kernel/include/trace/events/block.h new file mode 100644 index 000000000..e8a5eca1d --- /dev/null +++ b/kernel/include/trace/events/block.h @@ -0,0 +1,674 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM block + +#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BLOCK_H + +#include <linux/blktrace_api.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> +#include <linux/tracepoint.h> + +#define RWBS_LEN 8 + +DECLARE_EVENT_CLASS(block_buffer, + + TP_PROTO(struct buffer_head *bh), + + TP_ARGS(bh), + + TP_STRUCT__entry ( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( size_t, size ) + ), + + TP_fast_assign( + __entry->dev = bh->b_bdev->bd_dev; + __entry->sector = bh->b_blocknr; + __entry->size = bh->b_size; + ), + + TP_printk("%d,%d sector=%llu size=%zu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->sector, __entry->size + ) +); + +/** + * block_touch_buffer - mark a buffer accessed + * @bh: buffer_head being touched + * + * Called from touch_buffer(). + */ +DEFINE_EVENT(block_buffer, block_touch_buffer, + + TP_PROTO(struct buffer_head *bh), + + TP_ARGS(bh) +); + +/** + * block_dirty_buffer - mark a buffer dirty + * @bh: buffer_head being dirtied + * + * Called from mark_buffer_dirty(). + */ +DEFINE_EVENT(block_buffer, block_dirty_buffer, + + TP_PROTO(struct buffer_head *bh), + + TP_ARGS(bh) +); + +DECLARE_EVENT_CLASS(block_rq_with_error, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( int, errors ) + __array( char, rwbs, RWBS_LEN ) + __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) + ), + + TP_fast_assign( + __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_pos(rq); + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_sectors(rq); + __entry->errors = rq->errors; + + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_dump_cmd(__get_str(cmd), rq); + ), + + TP_printk("%d,%d %s (%s) %llu + %u [%d]", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, __get_str(cmd), + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->errors) +); + +/** + * block_rq_abort - abort block operation request + * @q: queue containing the block operation request + * @rq: block IO operation request + * + * Called immediately after pending block IO operation request @rq in + * queue @q is aborted. The fields in the operation request @rq + * can be examined to determine which device and sectors the pending + * operation would access. + */ +DEFINE_EVENT(block_rq_with_error, block_rq_abort, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +/** + * block_rq_requeue - place block IO request back on a queue + * @q: queue holding operation + * @rq: block IO operation request + * + * The block operation request @rq is being placed back into queue + * @q. For some reason the request was not completed and needs to be + * put back in the queue. + */ +DEFINE_EVENT(block_rq_with_error, block_rq_requeue, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +/** + * block_rq_complete - block IO operation completed by device driver + * @q: queue containing the block operation request + * @rq: block operations request + * @nr_bytes: number of completed bytes + * + * The block_rq_complete tracepoint event indicates that some portion + * of operation request has been completed by the device driver. If + * the @rq->bio is %NULL, then there is absolutely no additional work to + * do for the request. If @rq->bio is non-NULL then there is + * additional work required to complete the request. + */ +TRACE_EVENT(block_rq_complete, + + TP_PROTO(struct request_queue *q, struct request *rq, + unsigned int nr_bytes), + + TP_ARGS(q, rq, nr_bytes), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( int, errors ) + __array( char, rwbs, RWBS_LEN ) + __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) + ), + + TP_fast_assign( + __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; + __entry->sector = blk_rq_pos(rq); + __entry->nr_sector = nr_bytes >> 9; + __entry->errors = rq->errors; + + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); + blk_dump_cmd(__get_str(cmd), rq); + ), + + TP_printk("%d,%d %s (%s) %llu + %u [%d]", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, __get_str(cmd), + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->errors) +); + +DECLARE_EVENT_CLASS(block_rq, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( unsigned int, bytes ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) + ), + + TP_fast_assign( + __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_pos(rq); + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_sectors(rq); + __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_bytes(rq) : 0; + + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_dump_cmd(__get_str(cmd), rq); + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, __entry->bytes, __get_str(cmd), + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +); + +/** + * block_rq_insert - insert block operation request into queue + * @q: target queue + * @rq: block IO operation request + * + * Called immediately before block operation request @rq is inserted + * into queue @q. The fields in the operation request @rq struct can + * be examined to determine which device and sectors the pending + * operation would access. + */ +DEFINE_EVENT(block_rq, block_rq_insert, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +/** + * block_rq_issue - issue pending block IO request operation to device driver + * @q: queue holding operation + * @rq: block IO operation operation request + * + * Called when block operation request @rq from queue @q is sent to a + * device driver for processing. + */ +DEFINE_EVENT(block_rq, block_rq_issue, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +/** + * block_bio_bounce - used bounce buffer when processing block operation + * @q: queue holding the block operation + * @bio: block operation + * + * A bounce buffer was used to handle the block operation @bio in @q. + * This occurs when hardware limitations prevent a direct transfer of + * data between the @bio data memory area and the IO device. Use of a + * bounce buffer requires extra copying of data and decreases + * performance. + */ +TRACE_EVENT(block_bio_bounce, + + TP_PROTO(struct request_queue *q, struct bio *bio), + + TP_ARGS(q, bio), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev ? + bio->bi_bdev->bd_dev : 0; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio_sectors(bio); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("%d,%d %s %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +); + +/** + * block_bio_complete - completed all work on the block operation + * @q: queue holding the block operation + * @bio: block operation completed + * @error: io error value + * + * This tracepoint indicates there is no further work to do on this + * block IO operation @bio. + */ +TRACE_EVENT(block_bio_complete, + + TP_PROTO(struct request_queue *q, struct bio *bio, int error), + + TP_ARGS(q, bio, error), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned, nr_sector ) + __field( int, error ) + __array( char, rwbs, RWBS_LEN) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio_sectors(bio); + __entry->error = error; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + ), + + TP_printk("%d,%d %s %llu + %u [%d]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->error) +); + +DECLARE_EVENT_CLASS(block_bio_merge, + + TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), + + TP_ARGS(q, rq, bio), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio_sectors(bio); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("%d,%d %s %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +); + +/** + * block_bio_backmerge - merging block operation to the end of an existing operation + * @q: queue holding operation + * @rq: request bio is being merged into + * @bio: new block operation to merge + * + * Merging block request @bio to the end of an existing block request + * in queue @q. + */ +DEFINE_EVENT(block_bio_merge, block_bio_backmerge, + + TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), + + TP_ARGS(q, rq, bio) +); + +/** + * block_bio_frontmerge - merging block operation to the beginning of an existing operation + * @q: queue holding operation + * @rq: request bio is being merged into + * @bio: new block operation to merge + * + * Merging block IO operation @bio to the beginning of an existing block + * operation in queue @q. + */ +DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, + + TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), + + TP_ARGS(q, rq, bio) +); + +/** + * block_bio_queue - putting new block IO operation in queue + * @q: queue holding operation + * @bio: new block operation + * + * About to place the block IO operation @bio into queue @q. + */ +TRACE_EVENT(block_bio_queue, + + TP_PROTO(struct request_queue *q, struct bio *bio), + + TP_ARGS(q, bio), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio_sectors(bio); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("%d,%d %s %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +); + +DECLARE_EVENT_CLASS(block_get_rq, + + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), + + TP_ARGS(q, bio, rw), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; + __entry->sector = bio ? bio->bi_iter.bi_sector : 0; + __entry->nr_sector = bio ? bio_sectors(bio) : 0; + blk_fill_rwbs(__entry->rwbs, + bio ? bio->bi_rw : 0, __entry->nr_sector); + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("%d,%d %s %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +); + +/** + * block_getrq - get a free request entry in queue for block IO operations + * @q: queue for operations + * @bio: pending block IO operation + * @rw: low bit indicates a read (%0) or a write (%1) + * + * A request struct for queue @q has been allocated to handle the + * block IO operation @bio. + */ +DEFINE_EVENT(block_get_rq, block_getrq, + + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), + + TP_ARGS(q, bio, rw) +); + +/** + * block_sleeprq - waiting to get a free request entry in queue for block IO operation + * @q: queue for operation + * @bio: pending block IO operation + * @rw: low bit indicates a read (%0) or a write (%1) + * + * In the case where a request struct cannot be provided for queue @q + * the process needs to wait for an request struct to become + * available. This tracepoint event is generated each time the + * process goes to sleep waiting for request struct become available. + */ +DEFINE_EVENT(block_get_rq, block_sleeprq, + + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), + + TP_ARGS(q, bio, rw) +); + +/** + * block_plug - keep operations requests in request queue + * @q: request queue to plug + * + * Plug the request queue @q. Do not allow block operation requests + * to be sent to the device driver. Instead, accumulate requests in + * the queue to improve throughput performance of the block device. + */ +TRACE_EVENT(block_plug, + + TP_PROTO(struct request_queue *q), + + TP_ARGS(q), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("[%s]", __entry->comm) +); + +DECLARE_EVENT_CLASS(block_unplug, + + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), + + TP_ARGS(q, depth, explicit), + + TP_STRUCT__entry( + __field( int, nr_rq ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->nr_rq = depth; + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) +); + +/** + * block_unplug - release of operations requests in request queue + * @q: request queue to unplug + * @depth: number of requests just added to the queue + * @explicit: whether this was an explicit unplug, or one from schedule() + * + * Unplug request queue @q because device driver is scheduled to work + * on elements in the request queue. + */ +DEFINE_EVENT(block_unplug, block_unplug, + + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), + + TP_ARGS(q, depth, explicit) +); + +/** + * block_split - split a single bio struct into two bio structs + * @q: queue containing the bio + * @bio: block operation being split + * @new_sector: The starting sector for the new bio + * + * The bio request @bio in request queue @q needs to be split into two + * bio requests. The newly created @bio request starts at + * @new_sector. This split may be required due to hardware limitation + * such as operation crossing device boundaries in a RAID system. + */ +TRACE_EVENT(block_split, + + TP_PROTO(struct request_queue *q, struct bio *bio, + unsigned int new_sector), + + TP_ARGS(q, bio, new_sector), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( sector_t, new_sector ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->new_sector = new_sector; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + ), + + TP_printk("%d,%d %s %llu / %llu [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + (unsigned long long)__entry->new_sector, + __entry->comm) +); + +/** + * block_bio_remap - map request for a logical device to the raw device + * @q: queue holding the operation + * @bio: revised operation + * @dev: device for the operation + * @from: original sector for the operation + * + * An operation for a logical device has been mapped to the + * raw block device. + */ +TRACE_EVENT(block_bio_remap, + + TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, + sector_t from), + + TP_ARGS(q, bio, dev, from), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( dev_t, old_dev ) + __field( sector_t, old_sector ) + __array( char, rwbs, RWBS_LEN) + ), + + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio_sectors(bio); + __entry->old_dev = dev; + __entry->old_sector = from; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + ), + + TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, + MAJOR(__entry->old_dev), MINOR(__entry->old_dev), + (unsigned long long)__entry->old_sector) +); + +/** + * block_rq_remap - map request for a block operation request + * @q: queue holding the operation + * @rq: block IO operation request + * @dev: device for the operation + * @from: original sector for the operation + * + * The block operation request @rq in @q has been remapped. The block + * operation request @rq holds the current information and @from hold + * the original sector. + */ +TRACE_EVENT(block_rq_remap, + + TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, + sector_t from), + + TP_ARGS(q, rq, dev, from), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( dev_t, old_dev ) + __field( sector_t, old_sector ) + __field( unsigned int, nr_bios ) + __array( char, rwbs, RWBS_LEN) + ), + + TP_fast_assign( + __entry->dev = disk_devt(rq->rq_disk); + __entry->sector = blk_rq_pos(rq); + __entry->nr_sector = blk_rq_sectors(rq); + __entry->old_dev = dev; + __entry->old_sector = from; + __entry->nr_bios = blk_rq_count_bios(rq); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + ), + + TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, + MAJOR(__entry->old_dev), MINOR(__entry->old_dev), + (unsigned long long)__entry->old_sector, __entry->nr_bios) +); + +#endif /* _TRACE_BLOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> + diff --git a/kernel/include/trace/events/btrfs.h b/kernel/include/trace/events/btrfs.h new file mode 100644 index 000000000..7f79cf459 --- /dev/null +++ b/kernel/include/trace/events/btrfs.h @@ -0,0 +1,1178 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM btrfs + +#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BTRFS_H + +#include <linux/writeback.h> +#include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> + +struct btrfs_root; +struct btrfs_fs_info; +struct btrfs_inode; +struct extent_map; +struct btrfs_ordered_extent; +struct btrfs_delayed_ref_node; +struct btrfs_delayed_tree_ref; +struct btrfs_delayed_data_ref; +struct btrfs_delayed_ref_head; +struct btrfs_block_group_cache; +struct btrfs_free_cluster; +struct map_lookup; +struct extent_buffer; +struct btrfs_work; +struct __btrfs_workqueue; +struct btrfs_qgroup_operation; + +#define show_ref_type(type) \ + __print_symbolic(type, \ + { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \ + { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \ + { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \ + { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \ + { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" }) + +#define __show_root_type(obj) \ + __print_symbolic_u64(obj, \ + { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \ + { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \ + { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \ + { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \ + { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \ + { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \ + { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \ + { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \ + { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \ + { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ + { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \ + { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) + +#define show_root_type(obj) \ + obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \ + (obj >= BTRFS_ROOT_TREE_OBJECTID && \ + obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" + +#define BTRFS_GROUP_FLAGS \ + { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ + { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ + { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \ + { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \ + { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \ + { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \ + { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \ + { BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \ + { BTRFS_BLOCK_GROUP_RAID6, "RAID6"} + +#define BTRFS_UUID_SIZE 16 + +TRACE_EVENT(btrfs_transaction_commit, + + TP_PROTO(struct btrfs_root *root), + + TP_ARGS(root), + + TP_STRUCT__entry( + __field( u64, generation ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + __entry->generation = root->fs_info->generation; + __entry->root_objectid = root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), gen = %llu", + show_root_type(__entry->root_objectid), + (unsigned long long)__entry->generation) +); + +DECLARE_EVENT_CLASS(btrfs__inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( blkcnt_t, blocks ) + __field( u64, disk_i_size ) + __field( u64, generation ) + __field( u64, last_trans ) + __field( u64, logged_trans ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->blocks = inode->i_blocks; + __entry->disk_i_size = BTRFS_I(inode)->disk_i_size; + __entry->generation = BTRFS_I(inode)->generation; + __entry->last_trans = BTRFS_I(inode)->last_trans; + __entry->logged_trans = BTRFS_I(inode)->logged_trans; + __entry->root_objectid = + BTRFS_I(inode)->root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, " + "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu", + show_root_type(__entry->root_objectid), + (unsigned long long)__entry->generation, + (unsigned long)__entry->ino, + (unsigned long long)__entry->blocks, + (unsigned long long)__entry->disk_i_size, + (unsigned long long)__entry->last_trans, + (unsigned long long)__entry->logged_trans) +); + +DEFINE_EVENT(btrfs__inode, btrfs_inode_new, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(btrfs__inode, btrfs_inode_request, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +#define __show_map_type(type) \ + __print_symbolic_u64(type, \ + { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \ + { EXTENT_MAP_HOLE, "HOLE" }, \ + { EXTENT_MAP_INLINE, "INLINE" }, \ + { EXTENT_MAP_DELALLOC, "DELALLOC" }) + +#define show_map_type(type) \ + type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type) + +#define show_map_flags(flag) \ + __print_flags(flag, "|", \ + { (1 << EXTENT_FLAG_PINNED), "PINNED" },\ + { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\ + { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\ + { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\ + { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\ + { (1 << EXTENT_FLAG_FILLING), "FILLING" },\ + { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" }) + +TRACE_EVENT_CONDITION(btrfs_get_extent, + + TP_PROTO(struct btrfs_root *root, struct extent_map *map), + + TP_ARGS(root, map), + + TP_CONDITION(map), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, start ) + __field( u64, len ) + __field( u64, orig_start ) + __field( u64, block_start ) + __field( u64, block_len ) + __field( unsigned long, flags ) + __field( int, refs ) + __field( unsigned int, compress_type ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->start = map->start; + __entry->len = map->len; + __entry->orig_start = map->orig_start; + __entry->block_start = map->block_start; + __entry->block_len = map->block_len; + __entry->flags = map->flags; + __entry->refs = atomic_read(&map->refs); + __entry->compress_type = map->compress_type; + ), + + TP_printk("root = %llu(%s), start = %llu, len = %llu, " + "orig_start = %llu, block_start = %llu(%s), " + "block_len = %llu, flags = %s, refs = %u, " + "compress_type = %u", + show_root_type(__entry->root_objectid), + (unsigned long long)__entry->start, + (unsigned long long)__entry->len, + (unsigned long long)__entry->orig_start, + show_map_type(__entry->block_start), + (unsigned long long)__entry->block_len, + show_map_flags(__entry->flags), + __entry->refs, __entry->compress_type) +); + +#define show_ordered_flags(flags) \ + __print_flags(flags, "|", \ + { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \ + { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \ + { (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \ + { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \ + { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \ + { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \ + { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \ + { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \ + { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \ + { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" }) + + +DECLARE_EVENT_CLASS(btrfs__ordered_extent, + + TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( u64, file_offset ) + __field( u64, start ) + __field( u64, len ) + __field( u64, disk_len ) + __field( u64, bytes_left ) + __field( unsigned long, flags ) + __field( int, compress_type ) + __field( int, refs ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->file_offset = ordered->file_offset; + __entry->start = ordered->start; + __entry->len = ordered->len; + __entry->disk_len = ordered->disk_len; + __entry->bytes_left = ordered->bytes_left; + __entry->flags = ordered->flags; + __entry->compress_type = ordered->compress_type; + __entry->refs = atomic_read(&ordered->refs); + __entry->root_objectid = + BTRFS_I(inode)->root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, " + "start = %llu, len = %llu, disk_len = %llu, " + "bytes_left = %llu, flags = %s, compress_type = %d, " + "refs = %d", + show_root_type(__entry->root_objectid), + (unsigned long long)__entry->ino, + (unsigned long long)__entry->file_offset, + (unsigned long long)__entry->start, + (unsigned long long)__entry->len, + (unsigned long long)__entry->disk_len, + (unsigned long long)__entry->bytes_left, + show_ordered_flags(__entry->flags), + __entry->compress_type, __entry->refs) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add, + + TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove, + + TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start, + + TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put, + + TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DECLARE_EVENT_CLASS(btrfs__writepage, + + TP_PROTO(struct page *page, struct inode *inode, + struct writeback_control *wbc), + + TP_ARGS(page, inode, wbc), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( pgoff_t, index ) + __field( long, nr_to_write ) + __field( long, pages_skipped ) + __field( loff_t, range_start ) + __field( loff_t, range_end ) + __field( char, for_kupdate ) + __field( char, for_reclaim ) + __field( char, range_cyclic ) + __field( pgoff_t, writeback_index ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->index = page->index; + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->range_start = wbc->range_start; + __entry->range_end = wbc->range_end; + __entry->for_kupdate = wbc->for_kupdate; + __entry->for_reclaim = wbc->for_reclaim; + __entry->range_cyclic = wbc->range_cyclic; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->root_objectid = + BTRFS_I(inode)->root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, " + "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " + "range_end = %llu, for_kupdate = %d, " + "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", + show_root_type(__entry->root_objectid), + (unsigned long)__entry->ino, __entry->index, + __entry->nr_to_write, __entry->pages_skipped, + __entry->range_start, __entry->range_end, + __entry->for_kupdate, + __entry->for_reclaim, __entry->range_cyclic, + (unsigned long)__entry->writeback_index) +); + +DEFINE_EVENT(btrfs__writepage, __extent_writepage, + + TP_PROTO(struct page *page, struct inode *inode, + struct writeback_control *wbc), + + TP_ARGS(page, inode, wbc) +); + +TRACE_EVENT(btrfs_writepage_end_io_hook, + + TP_PROTO(struct page *page, u64 start, u64 end, int uptodate), + + TP_ARGS(page, start, end, uptodate), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( pgoff_t, index ) + __field( u64, start ) + __field( u64, end ) + __field( int, uptodate ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + __entry->ino = page->mapping->host->i_ino; + __entry->index = page->index; + __entry->start = start; + __entry->end = end; + __entry->uptodate = uptodate; + __entry->root_objectid = + BTRFS_I(page->mapping->host)->root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, " + "end = %llu, uptodate = %d", + show_root_type(__entry->root_objectid), + (unsigned long)__entry->ino, (unsigned long)__entry->index, + (unsigned long long)__entry->start, + (unsigned long long)__entry->end, __entry->uptodate) +); + +TRACE_EVENT(btrfs_sync_file, + + TP_PROTO(struct file *file, int datasync), + + TP_ARGS(file, datasync), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( ino_t, parent ) + __field( int, datasync ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = d_inode(dentry); + + __entry->ino = inode->i_ino; + __entry->parent = d_inode(dentry->d_parent)->i_ino; + __entry->datasync = datasync; + __entry->root_objectid = + BTRFS_I(inode)->root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d", + show_root_type(__entry->root_objectid), + (unsigned long)__entry->ino, (unsigned long)__entry->parent, + __entry->datasync) +); + +TRACE_EVENT(btrfs_sync_fs, + + TP_PROTO(int wait), + + TP_ARGS(wait), + + TP_STRUCT__entry( + __field( int, wait ) + ), + + TP_fast_assign( + __entry->wait = wait; + ), + + TP_printk("wait = %d", __entry->wait) +); + +#define show_ref_action(action) \ + __print_symbolic(action, \ + { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \ + { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \ + { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \ + { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" }) + + +DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( int, action ) + __field( u64, parent ) + __field( u64, ref_root ) + __field( int, level ) + __field( int, type ) + __field( u64, seq ) + ), + + TP_fast_assign( + __entry->bytenr = ref->bytenr; + __entry->num_bytes = ref->num_bytes; + __entry->action = action; + __entry->parent = full_ref->parent; + __entry->ref_root = full_ref->root; + __entry->level = full_ref->level; + __entry->type = ref->type; + __entry->seq = ref->seq; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " + "parent = %llu(%s), ref_root = %llu(%s), level = %d, " + "type = %s, seq = %llu", + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes, + show_ref_action(__entry->action), + show_root_type(__entry->parent), + show_root_type(__entry->ref_root), + __entry->level, show_ref_type(__entry->type), + (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( int, action ) + __field( u64, parent ) + __field( u64, ref_root ) + __field( u64, owner ) + __field( u64, offset ) + __field( int, type ) + __field( u64, seq ) + ), + + TP_fast_assign( + __entry->bytenr = ref->bytenr; + __entry->num_bytes = ref->num_bytes; + __entry->action = action; + __entry->parent = full_ref->parent; + __entry->ref_root = full_ref->root; + __entry->owner = full_ref->objectid; + __entry->offset = full_ref->offset; + __entry->type = ref->type; + __entry->seq = ref->seq; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " + "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " + "offset = %llu, type = %s, seq = %llu", + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes, + show_ref_action(__entry->action), + show_root_type(__entry->parent), + show_root_type(__entry->ref_root), + (unsigned long long)__entry->owner, + (unsigned long long)__entry->offset, + show_ref_type(__entry->type), + (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( int, action ) + __field( int, is_data ) + ), + + TP_fast_assign( + __entry->bytenr = ref->bytenr; + __entry->num_bytes = ref->num_bytes; + __entry->action = action; + __entry->is_data = head_ref->is_data; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d", + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes, + show_ref_action(__entry->action), + __entry->is_data) +); + +DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action) +); + +#define show_chunk_type(type) \ + __print_flags(type, "|", \ + { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \ + { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ + { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \ + { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \ + { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \ + { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \ + { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \ + { BTRFS_BLOCK_GROUP_RAID5, "RAID5" }, \ + { BTRFS_BLOCK_GROUP_RAID6, "RAID6" }) + +DECLARE_EVENT_CLASS(btrfs__chunk, + + TP_PROTO(struct btrfs_root *root, struct map_lookup *map, + u64 offset, u64 size), + + TP_ARGS(root, map, offset, size), + + TP_STRUCT__entry( + __field( int, num_stripes ) + __field( u64, type ) + __field( int, sub_stripes ) + __field( u64, offset ) + __field( u64, size ) + __field( u64, root_objectid ) + ), + + TP_fast_assign( + __entry->num_stripes = map->num_stripes; + __entry->type = map->type; + __entry->sub_stripes = map->sub_stripes; + __entry->offset = offset; + __entry->size = size; + __entry->root_objectid = root->root_key.objectid; + ), + + TP_printk("root = %llu(%s), offset = %llu, size = %llu, " + "num_stripes = %d, sub_stripes = %d, type = %s", + show_root_type(__entry->root_objectid), + (unsigned long long)__entry->offset, + (unsigned long long)__entry->size, + __entry->num_stripes, __entry->sub_stripes, + show_chunk_type(__entry->type)) +); + +DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc, + + TP_PROTO(struct btrfs_root *root, struct map_lookup *map, + u64 offset, u64 size), + + TP_ARGS(root, map, offset, size) +); + +DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free, + + TP_PROTO(struct btrfs_root *root, struct map_lookup *map, + u64 offset, u64 size), + + TP_ARGS(root, map, offset, size) +); + +TRACE_EVENT(btrfs_cow_block, + + TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf, + struct extent_buffer *cow), + + TP_ARGS(root, buf, cow), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, buf_start ) + __field( int, refs ) + __field( u64, cow_start ) + __field( int, buf_level ) + __field( int, cow_level ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->buf_start = buf->start; + __entry->refs = atomic_read(&buf->refs); + __entry->cow_start = cow->start; + __entry->buf_level = btrfs_header_level(buf); + __entry->cow_level = btrfs_header_level(cow); + ), + + TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu " + "(orig_level = %d), cow_buf = %llu (cow_level = %d)", + show_root_type(__entry->root_objectid), + __entry->refs, + (unsigned long long)__entry->buf_start, + __entry->buf_level, + (unsigned long long)__entry->cow_start, + __entry->cow_level) +); + +TRACE_EVENT(btrfs_space_reservation, + + TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val, + u64 bytes, int reserve), + + TP_ARGS(fs_info, type, val, bytes, reserve), + + TP_STRUCT__entry( + __array( u8, fsid, BTRFS_UUID_SIZE ) + __string( type, type ) + __field( u64, val ) + __field( u64, bytes ) + __field( int, reserve ) + ), + + TP_fast_assign( + memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE); + __assign_str(type, type); + __entry->val = val; + __entry->bytes = bytes; + __entry->reserve = reserve; + ), + + TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type), + __entry->val, __entry->reserve ? "reserve" : "release", + __entry->bytes) +); + +DECLARE_EVENT_CLASS(btrfs__reserved_extent, + + TP_PROTO(struct btrfs_root *root, u64 start, u64 len), + + TP_ARGS(root, start, len), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, start ) + __field( u64, len ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->start = start; + __entry->len = len; + ), + + TP_printk("root = %llu(%s), start = %llu, len = %llu", + show_root_type(__entry->root_objectid), + (unsigned long long)__entry->start, + (unsigned long long)__entry->len) +); + +DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc, + + TP_PROTO(struct btrfs_root *root, u64 start, u64 len), + + TP_ARGS(root, start, len) +); + +DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free, + + TP_PROTO(struct btrfs_root *root, u64 start, u64 len), + + TP_ARGS(root, start, len) +); + +TRACE_EVENT(find_free_extent, + + TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size, + u64 data), + + TP_ARGS(root, num_bytes, empty_size, data), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, num_bytes ) + __field( u64, empty_size ) + __field( u64, data ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->num_bytes = num_bytes; + __entry->empty_size = empty_size; + __entry->data = data; + ), + + TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, " + "flags = %Lu(%s)", show_root_type(__entry->root_objectid), + __entry->num_bytes, __entry->empty_size, __entry->data, + __print_flags((unsigned long)__entry->data, "|", + BTRFS_GROUP_FLAGS)) +); + +DECLARE_EVENT_CLASS(btrfs__reserve_extent, + + TP_PROTO(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group, u64 start, + u64 len), + + TP_ARGS(root, block_group, start, len), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, bg_objectid ) + __field( u64, flags ) + __field( u64, start ) + __field( u64, len ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->bg_objectid = block_group->key.objectid; + __entry->flags = block_group->flags; + __entry->start = start; + __entry->len = len; + ), + + TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), " + "start = %Lu, len = %Lu", + show_root_type(__entry->root_objectid), __entry->bg_objectid, + __entry->flags, __print_flags((unsigned long)__entry->flags, + "|", BTRFS_GROUP_FLAGS), + __entry->start, __entry->len) +); + +DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, + + TP_PROTO(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group, u64 start, + u64 len), + + TP_ARGS(root, block_group, start, len) +); + +DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, + + TP_PROTO(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group, u64 start, + u64 len), + + TP_ARGS(root, block_group, start, len) +); + +TRACE_EVENT(btrfs_find_cluster, + + TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start, + u64 bytes, u64 empty_size, u64 min_bytes), + + TP_ARGS(block_group, start, bytes, empty_size, min_bytes), + + TP_STRUCT__entry( + __field( u64, bg_objectid ) + __field( u64, flags ) + __field( u64, start ) + __field( u64, bytes ) + __field( u64, empty_size ) + __field( u64, min_bytes ) + ), + + TP_fast_assign( + __entry->bg_objectid = block_group->key.objectid; + __entry->flags = block_group->flags; + __entry->start = start; + __entry->bytes = bytes; + __entry->empty_size = empty_size; + __entry->min_bytes = min_bytes; + ), + + TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu," + " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid, + __entry->flags, + __print_flags((unsigned long)__entry->flags, "|", + BTRFS_GROUP_FLAGS), __entry->start, + __entry->bytes, __entry->empty_size, __entry->min_bytes) +); + +TRACE_EVENT(btrfs_failed_cluster_setup, + + TP_PROTO(struct btrfs_block_group_cache *block_group), + + TP_ARGS(block_group), + + TP_STRUCT__entry( + __field( u64, bg_objectid ) + ), + + TP_fast_assign( + __entry->bg_objectid = block_group->key.objectid; + ), + + TP_printk("block_group = %Lu", __entry->bg_objectid) +); + +TRACE_EVENT(btrfs_setup_cluster, + + TP_PROTO(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, u64 size, int bitmap), + + TP_ARGS(block_group, cluster, size, bitmap), + + TP_STRUCT__entry( + __field( u64, bg_objectid ) + __field( u64, flags ) + __field( u64, start ) + __field( u64, max_size ) + __field( u64, size ) + __field( int, bitmap ) + ), + + TP_fast_assign( + __entry->bg_objectid = block_group->key.objectid; + __entry->flags = block_group->flags; + __entry->start = cluster->window_start; + __entry->max_size = cluster->max_size; + __entry->size = size; + __entry->bitmap = bitmap; + ), + + TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, " + "size = %Lu, max_size = %Lu, bitmap = %d", + __entry->bg_objectid, + __entry->flags, + __print_flags((unsigned long)__entry->flags, "|", + BTRFS_GROUP_FLAGS), __entry->start, + __entry->size, __entry->max_size, __entry->bitmap) +); + +struct extent_state; +TRACE_EVENT(alloc_extent_state, + + TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP), + + TP_ARGS(state, mask, IP), + + TP_STRUCT__entry( + __field(struct extent_state *, state) + __field(gfp_t, mask) + __field(unsigned long, ip) + ), + + TP_fast_assign( + __entry->state = state, + __entry->mask = mask, + __entry->ip = IP + ), + + TP_printk("state=%p; mask = %s; caller = %pS", __entry->state, + show_gfp_flags(__entry->mask), (void *)__entry->ip) +); + +TRACE_EVENT(free_extent_state, + + TP_PROTO(struct extent_state *state, unsigned long IP), + + TP_ARGS(state, IP), + + TP_STRUCT__entry( + __field(struct extent_state *, state) + __field(unsigned long, ip) + ), + + TP_fast_assign( + __entry->state = state, + __entry->ip = IP + ), + + TP_printk(" state=%p; caller = %pS", __entry->state, + (void *)__entry->ip) +); + +DECLARE_EVENT_CLASS(btrfs__work, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, wq ) + __field( void *, func ) + __field( void *, ordered_func ) + __field( void *, ordered_free ) + __field( void *, normal_work ) + ), + + TP_fast_assign( + __entry->work = work; + __entry->wq = work->wq; + __entry->func = work->func; + __entry->ordered_func = work->ordered_func; + __entry->ordered_free = work->ordered_free; + __entry->normal_work = &work->normal_work; + ), + + TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p," + " ordered_free=%p", + __entry->work, __entry->normal_work, __entry->wq, + __entry->func, __entry->ordered_func, __entry->ordered_free) +); + +/* For situiations that the work is freed */ +DECLARE_EVENT_CLASS(btrfs__work__done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + ), + + TP_fast_assign( + __entry->work = work; + ), + + TP_printk("work->%p", __entry->work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_queued, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_sched, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue, + + TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + + TP_ARGS(wq, name, high), + + TP_STRUCT__entry( + __field( void *, wq ) + __string( name, name ) + __field( int , high ) + ), + + TP_fast_assign( + __entry->wq = wq; + __assign_str(name, name); + __entry->high = high; + ), + + TP_printk("name=%s%s, wq=%p", __get_str(name), + __print_flags(__entry->high, "", + {(WQ_HIGHPRI), "-high"}), + __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc, + + TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + + TP_ARGS(wq, name, high) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue_done, + + TP_PROTO(struct __btrfs_workqueue *wq), + + TP_ARGS(wq), + + TP_STRUCT__entry( + __field( void *, wq ) + ), + + TP_fast_assign( + __entry->wq = wq; + ), + + TP_printk("wq=%p", __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, + + TP_PROTO(struct __btrfs_workqueue *wq), + + TP_ARGS(wq) +); + +#define show_oper_type(type) \ + __print_symbolic(type, \ + { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \ + { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \ + { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \ + { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" }) + +DECLARE_EVENT_CLASS(btrfs_qgroup_oper, + + TP_PROTO(struct btrfs_qgroup_operation *oper), + + TP_ARGS(oper), + + TP_STRUCT__entry( + __field( u64, ref_root ) + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( u64, seq ) + __field( int, type ) + __field( u64, elem_seq ) + ), + + TP_fast_assign( + __entry->ref_root = oper->ref_root; + __entry->bytenr = oper->bytenr, + __entry->num_bytes = oper->num_bytes; + __entry->seq = oper->seq; + __entry->type = oper->type; + __entry->elem_seq = oper->elem.seq; + ), + + TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, " + "seq = %llu, elem.seq = %llu, type = %s", + (unsigned long long)__entry->ref_root, + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes, + (unsigned long long)__entry->seq, + (unsigned long long)__entry->elem_seq, + show_oper_type(__entry->type)) +); + +DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account, + + TP_PROTO(struct btrfs_qgroup_operation *oper), + + TP_ARGS(oper) +); + +DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref, + + TP_PROTO(struct btrfs_qgroup_operation *oper), + + TP_ARGS(oper) +); + +#endif /* _TRACE_BTRFS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/clk.h b/kernel/include/trace/events/clk.h new file mode 100644 index 000000000..758607226 --- /dev/null +++ b/kernel/include/trace/events/clk.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM clk + +#if !defined(_TRACE_CLK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CLK_H + +#include <linux/tracepoint.h> + +struct clk_core; + +DECLARE_EVENT_CLASS(clk, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core), + + TP_STRUCT__entry( + __string( name, core->name ) + ), + + TP_fast_assign( + __assign_str(name, core->name); + ), + + TP_printk("%s", __get_str(name)) +); + +DEFINE_EVENT(clk, clk_enable, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_enable_complete, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_disable, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_disable_complete, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_prepare, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_prepare_complete, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_unprepare, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DEFINE_EVENT(clk, clk_unprepare_complete, + + TP_PROTO(struct clk_core *core), + + TP_ARGS(core) +); + +DECLARE_EVENT_CLASS(clk_rate, + + TP_PROTO(struct clk_core *core, unsigned long rate), + + TP_ARGS(core, rate), + + TP_STRUCT__entry( + __string( name, core->name ) + __field(unsigned long, rate ) + ), + + TP_fast_assign( + __assign_str(name, core->name); + __entry->rate = rate; + ), + + TP_printk("%s %lu", __get_str(name), (unsigned long)__entry->rate) +); + +DEFINE_EVENT(clk_rate, clk_set_rate, + + TP_PROTO(struct clk_core *core, unsigned long rate), + + TP_ARGS(core, rate) +); + +DEFINE_EVENT(clk_rate, clk_set_rate_complete, + + TP_PROTO(struct clk_core *core, unsigned long rate), + + TP_ARGS(core, rate) +); + +DECLARE_EVENT_CLASS(clk_parent, + + TP_PROTO(struct clk_core *core, struct clk_core *parent), + + TP_ARGS(core, parent), + + TP_STRUCT__entry( + __string( name, core->name ) + __string( pname, parent->name ) + ), + + TP_fast_assign( + __assign_str(name, core->name); + __assign_str(pname, parent->name); + ), + + TP_printk("%s %s", __get_str(name), __get_str(pname)) +); + +DEFINE_EVENT(clk_parent, clk_set_parent, + + TP_PROTO(struct clk_core *core, struct clk_core *parent), + + TP_ARGS(core, parent) +); + +DEFINE_EVENT(clk_parent, clk_set_parent_complete, + + TP_PROTO(struct clk_core *core, struct clk_core *parent), + + TP_ARGS(core, parent) +); + +DECLARE_EVENT_CLASS(clk_phase, + + TP_PROTO(struct clk_core *core, int phase), + + TP_ARGS(core, phase), + + TP_STRUCT__entry( + __string( name, core->name ) + __field( int, phase ) + ), + + TP_fast_assign( + __assign_str(name, core->name); + __entry->phase = phase; + ), + + TP_printk("%s %d", __get_str(name), (int)__entry->phase) +); + +DEFINE_EVENT(clk_phase, clk_set_phase, + + TP_PROTO(struct clk_core *core, int phase), + + TP_ARGS(core, phase) +); + +DEFINE_EVENT(clk_phase, clk_set_phase_complete, + + TP_PROTO(struct clk_core *core, int phase), + + TP_ARGS(core, phase) +); + +#endif /* _TRACE_CLK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/cma.h b/kernel/include/trace/events/cma.h new file mode 100644 index 000000000..d7cd96172 --- /dev/null +++ b/kernel/include/trace/events/cma.h @@ -0,0 +1,66 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cma + +#if !defined(_TRACE_CMA_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CMA_H + +#include <linux/types.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(cma_alloc, + + TP_PROTO(unsigned long pfn, const struct page *page, + unsigned int count, unsigned int align), + + TP_ARGS(pfn, page, count, align), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(const struct page *, page) + __field(unsigned int, count) + __field(unsigned int, align) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->page = page; + __entry->count = count; + __entry->align = align; + ), + + TP_printk("pfn=%lx page=%p count=%u align=%u", + __entry->pfn, + __entry->page, + __entry->count, + __entry->align) +); + +TRACE_EVENT(cma_release, + + TP_PROTO(unsigned long pfn, const struct page *page, + unsigned int count), + + TP_ARGS(pfn, page, count), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(const struct page *, page) + __field(unsigned int, count) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->page = page; + __entry->count = count; + ), + + TP_printk("pfn=%lx page=%p count=%u", + __entry->pfn, + __entry->page, + __entry->count) +); + +#endif /* _TRACE_CMA_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/compaction.h b/kernel/include/trace/events/compaction.h new file mode 100644 index 000000000..9a6a3fe0f --- /dev/null +++ b/kernel/include/trace/events/compaction.h @@ -0,0 +1,300 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM compaction + +#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COMPACTION_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> + +DECLARE_EVENT_CLASS(mm_compaction_isolate_template, + + TP_PROTO( + unsigned long start_pfn, + unsigned long end_pfn, + unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), + + TP_STRUCT__entry( + __field(unsigned long, start_pfn) + __field(unsigned long, end_pfn) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_taken) + ), + + TP_fast_assign( + __entry->start_pfn = start_pfn; + __entry->end_pfn = end_pfn; + __entry->nr_scanned = nr_scanned; + __entry->nr_taken = nr_taken; + ), + + TP_printk("range=(0x%lx ~ 0x%lx) nr_scanned=%lu nr_taken=%lu", + __entry->start_pfn, + __entry->end_pfn, + __entry->nr_scanned, + __entry->nr_taken) +); + +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages, + + TP_PROTO( + unsigned long start_pfn, + unsigned long end_pfn, + unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) +); + +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, + + TP_PROTO( + unsigned long start_pfn, + unsigned long end_pfn, + unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) +); + +TRACE_EVENT(mm_compaction_migratepages, + + TP_PROTO(unsigned long nr_all, + int migrate_rc, + struct list_head *migratepages), + + TP_ARGS(nr_all, migrate_rc, migratepages), + + TP_STRUCT__entry( + __field(unsigned long, nr_migrated) + __field(unsigned long, nr_failed) + ), + + TP_fast_assign( + unsigned long nr_failed = 0; + struct list_head *page_lru; + + /* + * migrate_pages() returns either a non-negative number + * with the number of pages that failed migration, or an + * error code, in which case we need to count the remaining + * pages manually + */ + if (migrate_rc >= 0) + nr_failed = migrate_rc; + else + list_for_each(page_lru, migratepages) + nr_failed++; + + __entry->nr_migrated = nr_all - nr_failed; + __entry->nr_failed = nr_failed; + ), + + TP_printk("nr_migrated=%lu nr_failed=%lu", + __entry->nr_migrated, + __entry->nr_failed) +); + +TRACE_EVENT(mm_compaction_begin, + TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn, + unsigned long free_pfn, unsigned long zone_end, bool sync), + + TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync), + + TP_STRUCT__entry( + __field(unsigned long, zone_start) + __field(unsigned long, migrate_pfn) + __field(unsigned long, free_pfn) + __field(unsigned long, zone_end) + __field(bool, sync) + ), + + TP_fast_assign( + __entry->zone_start = zone_start; + __entry->migrate_pfn = migrate_pfn; + __entry->free_pfn = free_pfn; + __entry->zone_end = zone_end; + __entry->sync = sync; + ), + + TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s", + __entry->zone_start, + __entry->migrate_pfn, + __entry->free_pfn, + __entry->zone_end, + __entry->sync ? "sync" : "async") +); + +TRACE_EVENT(mm_compaction_end, + TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn, + unsigned long free_pfn, unsigned long zone_end, bool sync, + int status), + + TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status), + + TP_STRUCT__entry( + __field(unsigned long, zone_start) + __field(unsigned long, migrate_pfn) + __field(unsigned long, free_pfn) + __field(unsigned long, zone_end) + __field(bool, sync) + __field(int, status) + ), + + TP_fast_assign( + __entry->zone_start = zone_start; + __entry->migrate_pfn = migrate_pfn; + __entry->free_pfn = free_pfn; + __entry->zone_end = zone_end; + __entry->sync = sync; + __entry->status = status; + ), + + TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s status=%s", + __entry->zone_start, + __entry->migrate_pfn, + __entry->free_pfn, + __entry->zone_end, + __entry->sync ? "sync" : "async", + compaction_status_string[__entry->status]) +); + +TRACE_EVENT(mm_compaction_try_to_compact_pages, + + TP_PROTO( + int order, + gfp_t gfp_mask, + enum migrate_mode mode), + + TP_ARGS(order, gfp_mask, mode), + + TP_STRUCT__entry( + __field(int, order) + __field(gfp_t, gfp_mask) + __field(enum migrate_mode, mode) + ), + + TP_fast_assign( + __entry->order = order; + __entry->gfp_mask = gfp_mask; + __entry->mode = mode; + ), + + TP_printk("order=%d gfp_mask=0x%x mode=%d", + __entry->order, + __entry->gfp_mask, + (int)__entry->mode) +); + +DECLARE_EVENT_CLASS(mm_compaction_suitable_template, + + TP_PROTO(struct zone *zone, + int order, + int ret), + + TP_ARGS(zone, order, ret), + + TP_STRUCT__entry( + __field(int, nid) + __field(char *, name) + __field(int, order) + __field(int, ret) + ), + + TP_fast_assign( + __entry->nid = zone_to_nid(zone); + __entry->name = (char *)zone->name; + __entry->order = order; + __entry->ret = ret; + ), + + TP_printk("node=%d zone=%-8s order=%d ret=%s", + __entry->nid, + __entry->name, + __entry->order, + compaction_status_string[__entry->ret]) +); + +DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished, + + TP_PROTO(struct zone *zone, + int order, + int ret), + + TP_ARGS(zone, order, ret) +); + +DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable, + + TP_PROTO(struct zone *zone, + int order, + int ret), + + TP_ARGS(zone, order, ret) +); + +#ifdef CONFIG_COMPACTION +DECLARE_EVENT_CLASS(mm_compaction_defer_template, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order), + + TP_STRUCT__entry( + __field(int, nid) + __field(char *, name) + __field(int, order) + __field(unsigned int, considered) + __field(unsigned int, defer_shift) + __field(int, order_failed) + ), + + TP_fast_assign( + __entry->nid = zone_to_nid(zone); + __entry->name = (char *)zone->name; + __entry->order = order; + __entry->considered = zone->compact_considered; + __entry->defer_shift = zone->compact_defer_shift; + __entry->order_failed = zone->compact_order_failed; + ), + + TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu", + __entry->nid, + __entry->name, + __entry->order, + __entry->order_failed, + __entry->considered, + 1UL << __entry->defer_shift) +); + +DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order) +); + +DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order) +); + +DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order) +); +#endif + +#endif /* _TRACE_COMPACTION_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/context_tracking.h b/kernel/include/trace/events/context_tracking.h new file mode 100644 index 000000000..ce8007cf2 --- /dev/null +++ b/kernel/include/trace/events/context_tracking.h @@ -0,0 +1,58 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM context_tracking + +#if !defined(_TRACE_CONTEXT_TRACKING_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CONTEXT_TRACKING_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(context_tracking_user, + + TP_PROTO(int dummy), + + TP_ARGS(dummy), + + TP_STRUCT__entry( + __field( int, dummy ) + ), + + TP_fast_assign( + __entry->dummy = dummy; + ), + + TP_printk("%s", "") +); + +/** + * user_enter - called when the kernel resumes to userspace + * @dummy: dummy arg to make trace event macro happy + * + * This event occurs when the kernel resumes to userspace after + * an exception or a syscall. + */ +DEFINE_EVENT(context_tracking_user, user_enter, + + TP_PROTO(int dummy), + + TP_ARGS(dummy) +); + +/** + * user_exit - called when userspace enters the kernel + * @dummy: dummy arg to make trace event macro happy + * + * This event occurs when userspace enters the kernel through + * an exception or a syscall. + */ +DEFINE_EVENT(context_tracking_user, user_exit, + + TP_PROTO(int dummy), + + TP_ARGS(dummy) +); + + +#endif /* _TRACE_CONTEXT_TRACKING_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/ext3.h b/kernel/include/trace/events/ext3.h new file mode 100644 index 000000000..fc733d281 --- /dev/null +++ b/kernel/include/trace/events/ext3.h @@ -0,0 +1,866 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ext3 + +#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EXT3_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(ext3_free_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( uid_t, uid ) + __field( gid_t, gid ) + __field( blkcnt_t, blocks ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->uid = i_uid_read(inode); + __entry->gid = i_gid_read(inode); + __entry->blocks = inode->i_blocks; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->uid, __entry->gid, + (unsigned long) __entry->blocks) +); + +TRACE_EVENT(ext3_request_inode, + TP_PROTO(struct inode *dir, int mode), + + TP_ARGS(dir, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, dir ) + __field( umode_t, mode ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext3_allocate_inode, + TP_PROTO(struct inode *inode, struct inode *dir, int mode), + + TP_ARGS(inode, dir, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, dir ) + __field( umode_t, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext3_evict_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, nlink ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nlink = inode->i_nlink; + ), + + TP_printk("dev %d,%d ino %lu nlink %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->nlink) +); + +TRACE_EVENT(ext3_drop_inode, + TP_PROTO(struct inode *inode, int drop), + + TP_ARGS(inode, drop), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, drop ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->drop = drop; + ), + + TP_printk("dev %d,%d ino %lu drop %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->drop) +); + +TRACE_EVENT(ext3_mark_inode_dirty, + TP_PROTO(struct inode *inode, unsigned long IP), + + TP_ARGS(inode, IP), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field(unsigned long, ip ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ip = IP; + ), + + TP_printk("dev %d,%d ino %lu caller %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, (void *)__entry->ip) +); + +TRACE_EVENT(ext3_write_begin, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, len ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->flags = flags; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->flags) +); + +DECLARE_EVENT_CLASS(ext3__write_end, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, len ) + __field( unsigned int, copied ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->copied = copied; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DECLARE_EVENT_CLASS(ext3__page_op, + TP_PROTO(struct page *page), + + TP_ARGS(page), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( pgoff_t, index ) + + ), + + TP_fast_assign( + __entry->index = page->index; + __entry->ino = page->mapping->host->i_ino; + __entry->dev = page->mapping->host->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu page_index %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->index) +); + +DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_readpage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_releasepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +TRACE_EVENT(ext3_invalidatepage, + TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + + TP_ARGS(page, offset, length), + + TP_STRUCT__entry( + __field( pgoff_t, index ) + __field( unsigned int, offset ) + __field( unsigned int, length ) + __field( ino_t, ino ) + __field( dev_t, dev ) + + ), + + TP_fast_assign( + __entry->index = page->index; + __entry->offset = offset; + __entry->length = length; + __entry->ino = page->mapping->host->i_ino; + __entry->dev = page->mapping->host->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->index, __entry->offset, __entry->length) +); + +TRACE_EVENT(ext3_discard_blocks, + TP_PROTO(struct super_block *sb, unsigned long blk, + unsigned long count), + + TP_ARGS(sb, blk, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, blk ) + __field( unsigned long, count ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->blk = blk; + __entry->count = count; + ), + + TP_printk("dev %d,%d blk %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->blk, __entry->count) +); + +TRACE_EVENT(ext3_request_blocks, + TP_PROTO(struct inode *inode, unsigned long goal, + unsigned long count), + + TP_ARGS(inode, goal, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned long, count ) + __field( unsigned long, goal ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->count = count; + __entry->goal = goal; + ), + + TP_printk("dev %d,%d ino %lu count %lu goal %lu ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->count, __entry->goal) +); + +TRACE_EVENT(ext3_allocate_blocks, + TP_PROTO(struct inode *inode, unsigned long goal, + unsigned long count, unsigned long block), + + TP_ARGS(inode, goal, count, block), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned long, block ) + __field( unsigned long, count ) + __field( unsigned long, goal ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->block = block; + __entry->count = count; + __entry->goal = goal; + ), + + TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->count, __entry->block, + __entry->goal) +); + +TRACE_EVENT(ext3_free_blocks, + TP_PROTO(struct inode *inode, unsigned long block, + unsigned long count), + + TP_ARGS(inode, block, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( unsigned long, block ) + __field( unsigned long, count ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->block = block; + __entry->count = count; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->block, __entry->count) +); + +TRACE_EVENT(ext3_sync_file_enter, + TP_PROTO(struct file *file, int datasync), + + TP_ARGS(file, datasync), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, parent ) + __field( int, datasync ) + ), + + TP_fast_assign( + struct dentry *dentry = file->f_path.dentry; + + __entry->dev = d_inode(dentry)->i_sb->s_dev; + __entry->ino = d_inode(dentry)->i_ino; + __entry->datasync = datasync; + __entry->parent = d_inode(dentry->d_parent)->i_ino; + ), + + TP_printk("dev %d,%d ino %lu parent %ld datasync %d ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->parent, __entry->datasync) +); + +TRACE_EVENT(ext3_sync_file_exit, + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field( int, ret ) + __field( ino_t, ino ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->ret = ret; + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->ret) +); + +TRACE_EVENT(ext3_sync_fs, + TP_PROTO(struct super_block *sb, int wait), + + TP_ARGS(sb, wait), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, wait ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->wait = wait; + ), + + TP_printk("dev %d,%d wait %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->wait) +); + +TRACE_EVENT(ext3_rsv_window_add, + TP_PROTO(struct super_block *sb, + struct ext3_reserve_window_node *rsv_node), + + TP_ARGS(sb, rsv_node), + + TP_STRUCT__entry( + __field( unsigned long, start ) + __field( unsigned long, end ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->start = rsv_node->rsv_window._rsv_start; + __entry->end = rsv_node->rsv_window._rsv_end; + ), + + TP_printk("dev %d,%d start %lu end %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->start, __entry->end) +); + +TRACE_EVENT(ext3_discard_reservation, + TP_PROTO(struct inode *inode, + struct ext3_reserve_window_node *rsv_node), + + TP_ARGS(inode, rsv_node), + + TP_STRUCT__entry( + __field( unsigned long, start ) + __field( unsigned long, end ) + __field( ino_t, ino ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->start = rsv_node->rsv_window._rsv_start; + __entry->end = rsv_node->rsv_window._rsv_end; + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu start %lu end %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long)__entry->ino, __entry->start, + __entry->end) +); + +TRACE_EVENT(ext3_alloc_new_reservation, + TP_PROTO(struct super_block *sb, unsigned long goal), + + TP_ARGS(sb, goal), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, goal ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->goal = goal; + ), + + TP_printk("dev %d,%d goal %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->goal) +); + +TRACE_EVENT(ext3_reserved, + TP_PROTO(struct super_block *sb, unsigned long block, + struct ext3_reserve_window_node *rsv_node), + + TP_ARGS(sb, block, rsv_node), + + TP_STRUCT__entry( + __field( unsigned long, block ) + __field( unsigned long, start ) + __field( unsigned long, end ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->block = block; + __entry->start = rsv_node->rsv_window._rsv_start; + __entry->end = rsv_node->rsv_window._rsv_end; + __entry->dev = sb->s_dev; + ), + + TP_printk("dev %d,%d block %lu, start %lu end %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->block, __entry->start, __entry->end) +); + +TRACE_EVENT(ext3_forget, + TP_PROTO(struct inode *inode, int is_metadata, unsigned long block), + + TP_ARGS(inode, is_metadata, block), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( int, is_metadata ) + __field( unsigned long, block ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->is_metadata = is_metadata; + __entry->block = block; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->is_metadata, __entry->block) +); + +TRACE_EVENT(ext3_read_block_bitmap, + TP_PROTO(struct super_block *sb, unsigned int group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + ), + + TP_printk("dev %d,%d group %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group) +); + +TRACE_EVENT(ext3_direct_IO_enter, + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), + + TP_ARGS(inode, offset, len, rw), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( loff_t, pos ) + __field( unsigned long, len ) + __field( int, rw ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->rw) +); + +TRACE_EVENT(ext3_direct_IO_exit, + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, + int rw, int ret), + + TP_ARGS(inode, offset, len, rw, ret), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( loff_t, pos ) + __field( unsigned long, len ) + __field( int, rw ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pos, __entry->len, + __entry->rw, __entry->ret) +); + +TRACE_EVENT(ext3_unlink_enter, + TP_PROTO(struct inode *parent, struct dentry *dentry), + + TP_ARGS(parent, dentry), + + TP_STRUCT__entry( + __field( ino_t, parent ) + __field( ino_t, ino ) + __field( loff_t, size ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->parent = parent->i_ino; + __entry->ino = d_inode(dentry)->i_ino; + __entry->size = d_inode(dentry)->i_size; + __entry->dev = d_inode(dentry)->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu size %lld parent %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long)__entry->size, + (unsigned long) __entry->parent) +); + +TRACE_EVENT(ext3_unlink_exit, + TP_PROTO(struct dentry *dentry, int ret), + + TP_ARGS(dentry, ret), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ino = d_inode(dentry)->i_ino; + __entry->dev = d_inode(dentry)->i_sb->s_dev; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->ret) +); + +DECLARE_EVENT_CLASS(ext3__truncate, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( blkcnt_t, blocks ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->blocks = inode->i_blocks; + ), + + TP_printk("dev %d,%d ino %lu blocks %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, (unsigned long) __entry->blocks) +); + +DEFINE_EVENT(ext3__truncate, ext3_truncate_enter, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(ext3__truncate, ext3_truncate_exit, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(ext3_get_blocks_enter, + TP_PROTO(struct inode *inode, unsigned long lblk, + unsigned long len, int create), + + TP_ARGS(inode, lblk, len, create), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( unsigned long, lblk ) + __field( unsigned long, len ) + __field( int, create ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->lblk = lblk; + __entry->len = len; + __entry->create = create; + ), + + TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len, __entry->create) +); + +TRACE_EVENT(ext3_get_blocks_exit, + TP_PROTO(struct inode *inode, unsigned long lblk, + unsigned long pblk, unsigned long len, int ret), + + TP_ARGS(inode, lblk, pblk, len, ret), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + __field( unsigned long, lblk ) + __field( unsigned long, pblk ) + __field( unsigned long, len ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->lblk = lblk; + __entry->pblk = pblk; + __entry->len = len; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->pblk, + __entry->len, __entry->ret) +); + +TRACE_EVENT(ext3_load_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( ino_t, ino ) + __field( dev_t, dev ) + ), + + TP_fast_assign( + __entry->ino = inode->i_ino; + __entry->dev = inode->i_sb->s_dev; + ), + + TP_printk("dev %d,%d ino %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino) +); + +#endif /* _TRACE_EXT3_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/ext4.h b/kernel/include/trace/events/ext4.h new file mode 100644 index 000000000..08ec3dd27 --- /dev/null +++ b/kernel/include/trace/events/ext4.h @@ -0,0 +1,2512 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ext4 + +#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EXT4_H + +#include <linux/writeback.h> +#include <linux/tracepoint.h> + +struct ext4_allocation_context; +struct ext4_allocation_request; +struct ext4_extent; +struct ext4_prealloc_space; +struct ext4_inode_info; +struct mpage_da_data; +struct ext4_map_blocks; +struct extent_status; + +#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) + +#define show_mballoc_flags(flags) __print_flags(flags, "|", \ + { EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \ + { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \ + { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \ + { EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \ + { EXT4_MB_HINT_BEST, "HINT_BEST" }, \ + { EXT4_MB_HINT_DATA, "HINT_DATA" }, \ + { EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \ + { EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \ + { EXT4_MB_HINT_GOAL_ONLY, "HINT_GOAL_ONLY" }, \ + { EXT4_MB_HINT_TRY_GOAL, "HINT_TRY_GOAL" }, \ + { EXT4_MB_DELALLOC_RESERVED, "DELALLOC_RESV" }, \ + { EXT4_MB_STREAM_ALLOC, "STREAM_ALLOC" }, \ + { EXT4_MB_USE_ROOT_BLOCKS, "USE_ROOT_BLKS" }, \ + { EXT4_MB_USE_RESERVED, "USE_RESV" }) + +#define show_map_flags(flags) __print_flags(flags, "|", \ + { EXT4_GET_BLOCKS_CREATE, "CREATE" }, \ + { EXT4_GET_BLOCKS_UNWRIT_EXT, "UNWRIT" }, \ + { EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \ + { EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \ + { EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \ + { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \ + { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \ + { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \ + { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }) + +#define show_mflags(flags) __print_flags(flags, "", \ + { EXT4_MAP_NEW, "N" }, \ + { EXT4_MAP_MAPPED, "M" }, \ + { EXT4_MAP_UNWRITTEN, "U" }, \ + { EXT4_MAP_BOUNDARY, "B" }) + +#define show_free_flags(flags) __print_flags(flags, "|", \ + { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \ + { EXT4_FREE_BLOCKS_FORGET, "FORGET" }, \ + { EXT4_FREE_BLOCKS_VALIDATED, "VALIDATED" }, \ + { EXT4_FREE_BLOCKS_NO_QUOT_UPDATE, "NO_QUOTA" }, \ + { EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\ + { EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" }) + +#define show_extent_status(status) __print_flags(status, "", \ + { EXTENT_STATUS_WRITTEN, "W" }, \ + { EXTENT_STATUS_UNWRITTEN, "U" }, \ + { EXTENT_STATUS_DELAYED, "D" }, \ + { EXTENT_STATUS_HOLE, "H" }) + +#define show_falloc_mode(mode) __print_flags(mode, "|", \ + { FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \ + { FALLOC_FL_PUNCH_HOLE, "PUNCH_HOLE"}, \ + { FALLOC_FL_NO_HIDE_STALE, "NO_HIDE_STALE"}, \ + { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \ + { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) + + +TRACE_EVENT(ext4_other_inode_update_time, + TP_PROTO(struct inode *inode, ino_t orig_ino), + + TP_ARGS(inode, orig_ino), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, orig_ino ) + __field( uid_t, uid ) + __field( gid_t, gid ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->orig_ino = orig_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->uid = i_uid_read(inode); + __entry->gid = i_gid_read(inode); + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d orig_ino %lu ino %lu mode 0%o uid %u gid %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->orig_ino, + (unsigned long) __entry->ino, __entry->mode, + __entry->uid, __entry->gid) +); + +TRACE_EVENT(ext4_free_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( uid_t, uid ) + __field( gid_t, gid ) + __field( __u64, blocks ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->uid = i_uid_read(inode); + __entry->gid = i_gid_read(inode); + __entry->blocks = inode->i_blocks; + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->mode, + __entry->uid, __entry->gid, __entry->blocks) +); + +TRACE_EVENT(ext4_request_inode, + TP_PROTO(struct inode *dir, int mode), + + TP_ARGS(dir, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, dir ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext4_allocate_inode, + TP_PROTO(struct inode *inode, struct inode *dir, int mode), + + TP_ARGS(inode, dir, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, dir ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext4_evict_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, nlink ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nlink = inode->i_nlink; + ), + + TP_printk("dev %d,%d ino %lu nlink %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->nlink) +); + +TRACE_EVENT(ext4_drop_inode, + TP_PROTO(struct inode *inode, int drop), + + TP_ARGS(inode, drop), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, drop ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->drop = drop; + ), + + TP_printk("dev %d,%d ino %lu drop %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->drop) +); + +TRACE_EVENT(ext4_mark_inode_dirty, + TP_PROTO(struct inode *inode, unsigned long IP), + + TP_ARGS(inode, IP), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field(unsigned long, ip ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ip = IP; + ), + + TP_printk("dev %d,%d ino %lu caller %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, (void *)__entry->ip) +); + +TRACE_EVENT(ext4_begin_ordered_truncate, + TP_PROTO(struct inode *inode, loff_t new_size), + + TP_ARGS(inode, new_size), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, new_size ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->new_size = new_size; + ), + + TP_printk("dev %d,%d ino %lu new_size %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->new_size) +); + +DECLARE_EVENT_CLASS(ext4__write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, len ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->flags = flags; + ), + + TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->flags) +); + +DEFINE_EVENT(ext4__write_begin, ext4_write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags) +); + +DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags) +); + +DECLARE_EVENT_CLASS(ext4__write_end, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, len ) + __field( unsigned int, copied ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->copied = copied; + ), + + TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_da_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +TRACE_EVENT(ext4_writepages, + TP_PROTO(struct inode *inode, struct writeback_control *wbc), + + TP_ARGS(inode, wbc), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( long, nr_to_write ) + __field( long, pages_skipped ) + __field( loff_t, range_start ) + __field( loff_t, range_end ) + __field( pgoff_t, writeback_index ) + __field( int, sync_mode ) + __field( char, for_kupdate ) + __field( char, range_cyclic ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->range_start = wbc->range_start; + __entry->range_end = wbc->range_end; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->sync_mode = wbc->sync_mode; + __entry->for_kupdate = wbc->for_kupdate; + __entry->range_cyclic = wbc->range_cyclic; + ), + + TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " + "range_start %lld range_end %lld sync_mode %d " + "for_kupdate %d range_cyclic %d writeback_index %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->nr_to_write, + __entry->pages_skipped, __entry->range_start, + __entry->range_end, __entry->sync_mode, + __entry->for_kupdate, __entry->range_cyclic, + (unsigned long) __entry->writeback_index) +); + +TRACE_EVENT(ext4_da_write_pages, + TP_PROTO(struct inode *inode, pgoff_t first_page, + struct writeback_control *wbc), + + TP_ARGS(inode, first_page, wbc), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( pgoff_t, first_page ) + __field( long, nr_to_write ) + __field( int, sync_mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->first_page = first_page; + __entry->nr_to_write = wbc->nr_to_write; + __entry->sync_mode = wbc->sync_mode; + ), + + TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld " + "sync_mode %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->first_page, + __entry->nr_to_write, __entry->sync_mode) +); + +TRACE_EVENT(ext4_da_write_pages_extent, + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map), + + TP_ARGS(inode, map), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, lblk ) + __field( __u32, len ) + __field( __u32, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = map->m_lblk; + __entry->len = map->m_len; + __entry->flags = map->m_flags; + ), + + TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->lblk, __entry->len, + show_mflags(__entry->flags)) +); + +TRACE_EVENT(ext4_writepages_result, + TP_PROTO(struct inode *inode, struct writeback_control *wbc, + int ret, int pages_written), + + TP_ARGS(inode, wbc, ret, pages_written), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, ret ) + __field( int, pages_written ) + __field( long, pages_skipped ) + __field( pgoff_t, writeback_index ) + __field( int, sync_mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ret = ret; + __entry->pages_written = pages_written; + __entry->pages_skipped = wbc->pages_skipped; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->sync_mode = wbc->sync_mode; + ), + + TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld " + "sync_mode %d writeback_index %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->ret, + __entry->pages_written, __entry->pages_skipped, + __entry->sync_mode, + (unsigned long) __entry->writeback_index) +); + +DECLARE_EVENT_CLASS(ext4__page_op, + TP_PROTO(struct page *page), + + TP_ARGS(page), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( pgoff_t, index ) + + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->index = page->index; + ), + + TP_printk("dev %d,%d ino %lu page_index %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->index) +); + +DEFINE_EVENT(ext4__page_op, ext4_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext4__page_op, ext4_readpage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DEFINE_EVENT(ext4__page_op, ext4_releasepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) +); + +DECLARE_EVENT_CLASS(ext4_invalidatepage_op, + TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + + TP_ARGS(page, offset, length), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( pgoff_t, index ) + __field( unsigned int, offset ) + __field( unsigned int, length ) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->index = page->index; + __entry->offset = offset; + __entry->length = length; + ), + + TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->index, + __entry->offset, __entry->length) +); + +DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage, + TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + + TP_ARGS(page, offset, length) +); + +DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage, + TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + + TP_ARGS(page, offset, length) +); + +TRACE_EVENT(ext4_discard_blocks, + TP_PROTO(struct super_block *sb, unsigned long long blk, + unsigned long long count), + + TP_ARGS(sb, blk, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u64, blk ) + __field( __u64, count ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->blk = blk; + __entry->count = count; + ), + + TP_printk("dev %d,%d blk %llu count %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->blk, __entry->count) +); + +DECLARE_EVENT_CLASS(ext4__mb_new_pa, + TP_PROTO(struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa), + + TP_ARGS(ac, pa), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, pa_pstart ) + __field( __u64, pa_lstart ) + __field( __u32, pa_len ) + + ), + + TP_fast_assign( + __entry->dev = ac->ac_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->pa_pstart = pa->pa_pstart; + __entry->pa_lstart = pa->pa_lstart; + __entry->pa_len = pa->pa_len; + ), + + TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) +); + +DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa, + + TP_PROTO(struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa), + + TP_ARGS(ac, pa) +); + +DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa, + + TP_PROTO(struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa), + + TP_ARGS(ac, pa) +); + +TRACE_EVENT(ext4_mb_release_inode_pa, + TP_PROTO(struct ext4_prealloc_space *pa, + unsigned long long block, unsigned int count), + + TP_ARGS(pa, block, count), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, block ) + __field( __u32, count ) + + ), + + TP_fast_assign( + __entry->dev = pa->pa_inode->i_sb->s_dev; + __entry->ino = pa->pa_inode->i_ino; + __entry->block = block; + __entry->count = count; + ), + + TP_printk("dev %d,%d ino %lu block %llu count %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->block, __entry->count) +); + +TRACE_EVENT(ext4_mb_release_group_pa, + TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa), + + TP_ARGS(sb, pa), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u64, pa_pstart ) + __field( __u32, pa_len ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->pa_pstart = pa->pa_pstart; + __entry->pa_len = pa->pa_len; + ), + + TP_printk("dev %d,%d pstart %llu len %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->pa_pstart, __entry->pa_len) +); + +TRACE_EVENT(ext4_discard_preallocations, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + ), + + TP_printk("dev %d,%d ino %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino) +); + +TRACE_EVENT(ext4_mb_discard_preallocations, + TP_PROTO(struct super_block *sb, int needed), + + TP_ARGS(sb, needed), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, needed ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->needed = needed; + ), + + TP_printk("dev %d,%d needed %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->needed) +); + +TRACE_EVENT(ext4_request_blocks, + TP_PROTO(struct ext4_allocation_request *ar), + + TP_ARGS(ar), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, len ) + __field( __u32, logical ) + __field( __u32, lleft ) + __field( __u32, lright ) + __field( __u64, goal ) + __field( __u64, pleft ) + __field( __u64, pright ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = ar->inode->i_sb->s_dev; + __entry->ino = ar->inode->i_ino; + __entry->len = ar->len; + __entry->logical = ar->logical; + __entry->goal = ar->goal; + __entry->lleft = ar->lleft; + __entry->lright = ar->lright; + __entry->pleft = ar->pleft; + __entry->pright = ar->pright; + __entry->flags = ar->flags; + ), + + TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu " + "lleft %u lright %u pleft %llu pright %llu ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags), + __entry->len, __entry->logical, __entry->goal, + __entry->lleft, __entry->lright, __entry->pleft, + __entry->pright) +); + +TRACE_EVENT(ext4_allocate_blocks, + TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block), + + TP_ARGS(ar, block), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, block ) + __field( unsigned int, len ) + __field( __u32, logical ) + __field( __u32, lleft ) + __field( __u32, lright ) + __field( __u64, goal ) + __field( __u64, pleft ) + __field( __u64, pright ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = ar->inode->i_sb->s_dev; + __entry->ino = ar->inode->i_ino; + __entry->block = block; + __entry->len = ar->len; + __entry->logical = ar->logical; + __entry->goal = ar->goal; + __entry->lleft = ar->lleft; + __entry->lright = ar->lright; + __entry->pleft = ar->pleft; + __entry->pright = ar->pright; + __entry->flags = ar->flags; + ), + + TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u " + "goal %llu lleft %u lright %u pleft %llu pright %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags), + __entry->len, __entry->block, __entry->logical, + __entry->goal, __entry->lleft, __entry->lright, + __entry->pleft, __entry->pright) +); + +TRACE_EVENT(ext4_free_blocks, + TP_PROTO(struct inode *inode, __u64 block, unsigned long count, + int flags), + + TP_ARGS(inode, block, count, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, block ) + __field( unsigned long, count ) + __field( int, flags ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->block = block; + __entry->count = count; + __entry->flags = flags; + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->block, __entry->count, + show_free_flags(__entry->flags)) +); + +TRACE_EVENT(ext4_sync_file_enter, + TP_PROTO(struct file *file, int datasync), + + TP_ARGS(file, datasync), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, parent ) + __field( int, datasync ) + ), + + TP_fast_assign( + struct dentry *dentry = file->f_path.dentry; + + __entry->dev = d_inode(dentry)->i_sb->s_dev; + __entry->ino = d_inode(dentry)->i_ino; + __entry->datasync = datasync; + __entry->parent = d_inode(dentry->d_parent)->i_ino; + ), + + TP_printk("dev %d,%d ino %lu parent %lu datasync %d ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->parent, __entry->datasync) +); + +TRACE_EVENT(ext4_sync_file_exit, + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->ret) +); + +TRACE_EVENT(ext4_sync_fs, + TP_PROTO(struct super_block *sb, int wait), + + TP_ARGS(sb, wait), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, wait ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->wait = wait; + ), + + TP_printk("dev %d,%d wait %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->wait) +); + +TRACE_EVENT(ext4_alloc_da_blocks, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, data_blocks ) + __field( unsigned int, meta_blocks ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + ), + + TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->data_blocks, __entry->meta_blocks) +); + +TRACE_EVENT(ext4_mballoc_alloc, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u32, orig_logical ) + __field( int, orig_start ) + __field( __u32, orig_group ) + __field( int, orig_len ) + __field( __u32, goal_logical ) + __field( int, goal_start ) + __field( __u32, goal_group ) + __field( int, goal_len ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + __field( __u16, found ) + __field( __u16, groups ) + __field( __u16, buddy ) + __field( __u16, flags ) + __field( __u16, tail ) + __field( __u8, cr ) + ), + + TP_fast_assign( + __entry->dev = ac->ac_inode->i_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->orig_logical = ac->ac_o_ex.fe_logical; + __entry->orig_start = ac->ac_o_ex.fe_start; + __entry->orig_group = ac->ac_o_ex.fe_group; + __entry->orig_len = ac->ac_o_ex.fe_len; + __entry->goal_logical = ac->ac_g_ex.fe_logical; + __entry->goal_start = ac->ac_g_ex.fe_start; + __entry->goal_group = ac->ac_g_ex.fe_group; + __entry->goal_len = ac->ac_g_ex.fe_len; + __entry->result_logical = ac->ac_f_ex.fe_logical; + __entry->result_start = ac->ac_f_ex.fe_start; + __entry->result_group = ac->ac_f_ex.fe_group; + __entry->result_len = ac->ac_f_ex.fe_len; + __entry->found = ac->ac_found; + __entry->flags = ac->ac_flags; + __entry->groups = ac->ac_groups_scanned; + __entry->buddy = ac->ac_buddy; + __entry->tail = ac->ac_tail; + __entry->cr = ac->ac_criteria; + ), + + TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " + "result %u/%d/%u@%u blks %u grps %u cr %u flags %s " + "tail %u broken %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->orig_group, __entry->orig_start, + __entry->orig_len, __entry->orig_logical, + __entry->goal_group, __entry->goal_start, + __entry->goal_len, __entry->goal_logical, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical, + __entry->found, __entry->groups, __entry->cr, + show_mballoc_flags(__entry->flags), __entry->tail, + __entry->buddy ? 1 << __entry->buddy : 0) +); + +TRACE_EVENT(ext4_mballoc_prealloc, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u32, orig_logical ) + __field( int, orig_start ) + __field( __u32, orig_group ) + __field( int, orig_len ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev = ac->ac_inode->i_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->orig_logical = ac->ac_o_ex.fe_logical; + __entry->orig_start = ac->ac_o_ex.fe_start; + __entry->orig_group = ac->ac_o_ex.fe_group; + __entry->orig_len = ac->ac_o_ex.fe_len; + __entry->result_logical = ac->ac_b_ex.fe_logical; + __entry->result_start = ac->ac_b_ex.fe_start; + __entry->result_group = ac->ac_b_ex.fe_group; + __entry->result_len = ac->ac_b_ex.fe_len; + ), + + TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->orig_group, __entry->orig_start, + __entry->orig_len, __entry->orig_logical, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical) +); + +DECLARE_EVENT_CLASS(ext4__mballoc, + TP_PROTO(struct super_block *sb, + struct inode *inode, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, inode, group, start, len), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->ino = inode ? inode->i_ino : 0; + __entry->result_start = start; + __entry->result_group = group; + __entry->result_len = len; + ), + + TP_printk("dev %d,%d inode %lu extent %u/%d/%d ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->result_group, __entry->result_start, + __entry->result_len) +); + +DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard, + + TP_PROTO(struct super_block *sb, + struct inode *inode, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, inode, group, start, len) +); + +DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free, + + TP_PROTO(struct super_block *sb, + struct inode *inode, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, inode, group, start, len) +); + +TRACE_EVENT(ext4_forget, + TP_PROTO(struct inode *inode, int is_metadata, __u64 block), + + TP_ARGS(inode, is_metadata, block), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, block ) + __field( int, is_metadata ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->block = block; + __entry->is_metadata = is_metadata; + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->is_metadata, __entry->block) +); + +TRACE_EVENT(ext4_da_update_reserve_space, + TP_PROTO(struct inode *inode, int used_blocks, int quota_claim), + + TP_ARGS(inode, used_blocks, quota_claim), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, i_blocks ) + __field( int, used_blocks ) + __field( int, reserved_data_blocks ) + __field( int, reserved_meta_blocks ) + __field( int, allocated_meta_blocks ) + __field( int, quota_claim ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->i_blocks = inode->i_blocks; + __entry->used_blocks = used_blocks; + __entry->reserved_data_blocks = + EXT4_I(inode)->i_reserved_data_blocks; + __entry->reserved_meta_blocks = + EXT4_I(inode)->i_reserved_meta_blocks; + __entry->allocated_meta_blocks = + EXT4_I(inode)->i_allocated_meta_blocks; + __entry->quota_claim = quota_claim; + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d " + "reserved_data_blocks %d reserved_meta_blocks %d " + "allocated_meta_blocks %d quota_claim %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->i_blocks, + __entry->used_blocks, __entry->reserved_data_blocks, + __entry->reserved_meta_blocks, __entry->allocated_meta_blocks, + __entry->quota_claim) +); + +TRACE_EVENT(ext4_da_reserve_space, + TP_PROTO(struct inode *inode, int md_needed), + + TP_ARGS(inode, md_needed), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, i_blocks ) + __field( int, md_needed ) + __field( int, reserved_data_blocks ) + __field( int, reserved_meta_blocks ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->i_blocks = inode->i_blocks; + __entry->md_needed = md_needed; + __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d " + "reserved_data_blocks %d reserved_meta_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->i_blocks, + __entry->md_needed, __entry->reserved_data_blocks, + __entry->reserved_meta_blocks) +); + +TRACE_EVENT(ext4_da_release_space, + TP_PROTO(struct inode *inode, int freed_blocks), + + TP_ARGS(inode, freed_blocks), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, i_blocks ) + __field( int, freed_blocks ) + __field( int, reserved_data_blocks ) + __field( int, reserved_meta_blocks ) + __field( int, allocated_meta_blocks ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->i_blocks = inode->i_blocks; + __entry->freed_blocks = freed_blocks; + __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d " + "reserved_data_blocks %d reserved_meta_blocks %d " + "allocated_meta_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->mode, __entry->i_blocks, + __entry->freed_blocks, __entry->reserved_data_blocks, + __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) +); + +DECLARE_EVENT_CLASS(ext4__bitmap_load, + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + ), + + TP_printk("dev %d,%d group %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); + +TRACE_EVENT(ext4_direct_IO_enter, + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), + + TP_ARGS(inode, offset, len, rw), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned long, len ) + __field( int, rw ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + ), + + TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->rw) +); + +TRACE_EVENT(ext4_direct_IO_exit, + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, + int rw, int ret), + + TP_ARGS(inode, offset, len, rw, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned long, len ) + __field( int, rw ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->pos, __entry->len, + __entry->rw, __entry->ret) +); + +DECLARE_EVENT_CLASS(ext4__fallocate_mode, + TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + + TP_ARGS(inode, offset, len, mode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, offset ) + __field( loff_t, len ) + __field( int, mode ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->offset = offset; + __entry->len = len; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d ino %lu offset %lld len %lld mode %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->offset, __entry->len, + show_falloc_mode(__entry->mode)) +); + +DEFINE_EVENT(ext4__fallocate_mode, ext4_fallocate_enter, + + TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + + TP_ARGS(inode, offset, len, mode) +); + +DEFINE_EVENT(ext4__fallocate_mode, ext4_punch_hole, + + TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + + TP_ARGS(inode, offset, len, mode) +); + +DEFINE_EVENT(ext4__fallocate_mode, ext4_zero_range, + + TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + + TP_ARGS(inode, offset, len, mode) +); + +TRACE_EVENT(ext4_fallocate_exit, + TP_PROTO(struct inode *inode, loff_t offset, + unsigned int max_blocks, int ret), + + TP_ARGS(inode, offset, max_blocks, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, pos ) + __field( unsigned int, blocks ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = offset; + __entry->blocks = max_blocks; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->pos, __entry->blocks, + __entry->ret) +); + +TRACE_EVENT(ext4_unlink_enter, + TP_PROTO(struct inode *parent, struct dentry *dentry), + + TP_ARGS(parent, dentry), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, parent ) + __field( loff_t, size ) + ), + + TP_fast_assign( + __entry->dev = d_inode(dentry)->i_sb->s_dev; + __entry->ino = d_inode(dentry)->i_ino; + __entry->parent = parent->i_ino; + __entry->size = d_inode(dentry)->i_size; + ), + + TP_printk("dev %d,%d ino %lu size %lld parent %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->size, + (unsigned long) __entry->parent) +); + +TRACE_EVENT(ext4_unlink_exit, + TP_PROTO(struct dentry *dentry, int ret), + + TP_ARGS(dentry, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = d_inode(dentry)->i_sb->s_dev; + __entry->ino = d_inode(dentry)->i_ino; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->ret) +); + +DECLARE_EVENT_CLASS(ext4__truncate, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, blocks ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->blocks = inode->i_blocks; + ), + + TP_printk("dev %d,%d ino %lu blocks %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->blocks) +); + +DEFINE_EVENT(ext4__truncate, ext4_truncate_enter, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(ext4__truncate, ext4_truncate_exit, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +/* 'ux' is the unwritten extent. */ +TRACE_EVENT(ext4_ext_convert_to_initialized_enter, + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, + struct ext4_extent *ux), + + TP_ARGS(inode, map, ux), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, m_lblk ) + __field( unsigned, m_len ) + __field( ext4_lblk_t, u_lblk ) + __field( unsigned, u_len ) + __field( ext4_fsblk_t, u_pblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->m_lblk = map->m_lblk; + __entry->m_len = map->m_len; + __entry->u_lblk = le32_to_cpu(ux->ee_block); + __entry->u_len = ext4_ext_get_actual_len(ux); + __entry->u_pblk = ext4_ext_pblock(ux); + ), + + TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u " + "u_pblk %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->m_lblk, __entry->m_len, + __entry->u_lblk, __entry->u_len, __entry->u_pblk) +); + +/* + * 'ux' is the unwritten extent. + * 'ix' is the initialized extent to which blocks are transferred. + */ +TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath, + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, + struct ext4_extent *ux, struct ext4_extent *ix), + + TP_ARGS(inode, map, ux, ix), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, m_lblk ) + __field( unsigned, m_len ) + __field( ext4_lblk_t, u_lblk ) + __field( unsigned, u_len ) + __field( ext4_fsblk_t, u_pblk ) + __field( ext4_lblk_t, i_lblk ) + __field( unsigned, i_len ) + __field( ext4_fsblk_t, i_pblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->m_lblk = map->m_lblk; + __entry->m_len = map->m_len; + __entry->u_lblk = le32_to_cpu(ux->ee_block); + __entry->u_len = ext4_ext_get_actual_len(ux); + __entry->u_pblk = ext4_ext_pblock(ux); + __entry->i_lblk = le32_to_cpu(ix->ee_block); + __entry->i_len = ext4_ext_get_actual_len(ix); + __entry->i_pblk = ext4_ext_pblock(ix); + ), + + TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u " + "u_lblk %u u_len %u u_pblk %llu " + "i_lblk %u i_len %u i_pblk %llu ", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->m_lblk, __entry->m_len, + __entry->u_lblk, __entry->u_len, __entry->u_pblk, + __entry->i_lblk, __entry->i_len, __entry->i_pblk) +); + +DECLARE_EVENT_CLASS(ext4__map_blocks_enter, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, + unsigned int len, unsigned int flags), + + TP_ARGS(inode, lblk, len, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( unsigned int, len ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + __entry->len = len; + __entry->flags = flags; + ), + + TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len, show_map_flags(__entry->flags)) +); + +DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, + unsigned len, unsigned flags), + + TP_ARGS(inode, lblk, len, flags) +); + +DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, + unsigned len, unsigned flags), + + TP_ARGS(inode, lblk, len, flags) +); + +DECLARE_EVENT_CLASS(ext4__map_blocks_exit, + TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map, + int ret), + + TP_ARGS(inode, flags, map, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, flags ) + __field( ext4_fsblk_t, pblk ) + __field( ext4_lblk_t, lblk ) + __field( unsigned int, len ) + __field( unsigned int, mflags ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->flags = flags; + __entry->pblk = map->m_pblk; + __entry->lblk = map->m_lblk; + __entry->len = map->m_len; + __entry->mflags = map->m_flags; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u " + "mflags %s ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + show_map_flags(__entry->flags), __entry->lblk, __entry->pblk, + __entry->len, show_mflags(__entry->mflags), __entry->ret) +); + +DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit, + TP_PROTO(struct inode *inode, unsigned flags, + struct ext4_map_blocks *map, int ret), + + TP_ARGS(inode, flags, map, ret) +); + +DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit, + TP_PROTO(struct inode *inode, unsigned flags, + struct ext4_map_blocks *map, int ret), + + TP_ARGS(inode, flags, map, ret) +); + +TRACE_EVENT(ext4_ext_load_extent, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk), + + TP_ARGS(inode, lblk, pblk), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_fsblk_t, pblk ) + __field( ext4_lblk_t, lblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pblk = pblk; + __entry->lblk = lblk; + ), + + TP_printk("dev %d,%d ino %lu lblk %u pblk %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->pblk) +); + +TRACE_EVENT(ext4_load_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + ), + + TP_printk("dev %d,%d ino %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino) +); + +TRACE_EVENT(ext4_journal_start, + TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks, + unsigned long IP), + + TP_ARGS(sb, blocks, rsv_blocks, IP), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field(unsigned long, ip ) + __field( int, blocks ) + __field( int, rsv_blocks ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->ip = IP; + __entry->blocks = blocks; + __entry->rsv_blocks = rsv_blocks; + ), + + TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip) +); + +TRACE_EVENT(ext4_journal_start_reserved, + TP_PROTO(struct super_block *sb, int blocks, unsigned long IP), + + TP_ARGS(sb, blocks, IP), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field(unsigned long, ip ) + __field( int, blocks ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->ip = IP; + __entry->blocks = blocks; + ), + + TP_printk("dev %d,%d blocks, %d caller %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->blocks, (void *)__entry->ip) +); + +DECLARE_EVENT_CLASS(ext4__trim, + TP_PROTO(struct super_block *sb, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, group, start, len), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( __u32, group ) + __field( int, start ) + __field( int, len ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); + __entry->group = group; + __entry->start = start; + __entry->len = len; + ), + + TP_printk("dev %d,%d group %u, start %d, len %d", + __entry->dev_major, __entry->dev_minor, + __entry->group, __entry->start, __entry->len) +); + +DEFINE_EVENT(ext4__trim, ext4_trim_extent, + + TP_PROTO(struct super_block *sb, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, group, start, len) +); + +DEFINE_EVENT(ext4__trim, ext4_trim_all_free, + + TP_PROTO(struct super_block *sb, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, group, start, len) +); + +TRACE_EVENT(ext4_ext_handle_unwritten_extents, + TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags, + unsigned int allocated, ext4_fsblk_t newblock), + + TP_ARGS(inode, map, flags, allocated, newblock), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( int, flags ) + __field( ext4_lblk_t, lblk ) + __field( ext4_fsblk_t, pblk ) + __field( unsigned int, len ) + __field( unsigned int, allocated ) + __field( ext4_fsblk_t, newblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->flags = flags; + __entry->lblk = map->m_lblk; + __entry->pblk = map->m_pblk; + __entry->len = map->m_len; + __entry->allocated = allocated; + __entry->newblk = newblock; + ), + + TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s " + "allocated %d newblock %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, + __entry->len, show_map_flags(__entry->flags), + (unsigned int) __entry->allocated, + (unsigned long long) __entry->newblk) +); + +TRACE_EVENT(ext4_get_implied_cluster_alloc_exit, + TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret), + + TP_ARGS(sb, map, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned int, flags ) + __field( ext4_lblk_t, lblk ) + __field( ext4_fsblk_t, pblk ) + __field( unsigned int, len ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->flags = map->m_flags; + __entry->lblk = map->m_lblk; + __entry->pblk = map->m_pblk; + __entry->len = map->m_len; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %s ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->lblk, (unsigned long long) __entry->pblk, + __entry->len, show_mflags(__entry->flags), __entry->ret) +); + +TRACE_EVENT(ext4_ext_put_in_cache, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len, + ext4_fsblk_t start), + + TP_ARGS(inode, lblk, len, start), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( unsigned int, len ) + __field( ext4_fsblk_t, start ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + __entry->len = len; + __entry->start = start; + ), + + TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->lblk, + __entry->len, + (unsigned long long) __entry->start) +); + +TRACE_EVENT(ext4_ext_in_cache, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret), + + TP_ARGS(inode, lblk, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu lblk %u ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->lblk, + __entry->ret) + +); + +TRACE_EVENT(ext4_find_delalloc_range, + TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to, + int reverse, int found, ext4_lblk_t found_blk), + + TP_ARGS(inode, from, to, reverse, found, found_blk), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, from ) + __field( ext4_lblk_t, to ) + __field( int, reverse ) + __field( int, found ) + __field( ext4_lblk_t, found_blk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->from = from; + __entry->to = to; + __entry->reverse = reverse; + __entry->found = found; + __entry->found_blk = found_blk; + ), + + TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d " + "(blk = %u)", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->from, (unsigned) __entry->to, + __entry->reverse, __entry->found, + (unsigned) __entry->found_blk) +); + +TRACE_EVENT(ext4_get_reserved_cluster_alloc, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len), + + TP_ARGS(inode, lblk, len), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( unsigned int, len ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + __entry->len = len; + ), + + TP_printk("dev %d,%d ino %lu lblk %u len %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->lblk, + __entry->len) +); + +TRACE_EVENT(ext4_ext_show_extent, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, + unsigned short len), + + TP_ARGS(inode, lblk, pblk, len), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_fsblk_t, pblk ) + __field( ext4_lblk_t, lblk ) + __field( unsigned short, len ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pblk = pblk; + __entry->lblk = lblk; + __entry->len = len; + ), + + TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->lblk, + (unsigned long long) __entry->pblk, + (unsigned short) __entry->len) +); + +TRACE_EVENT(ext4_remove_blocks, + TP_PROTO(struct inode *inode, struct ext4_extent *ex, + ext4_lblk_t from, ext4_fsblk_t to, + long long partial_cluster), + + TP_ARGS(inode, ex, from, to, partial_cluster), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, from ) + __field( ext4_lblk_t, to ) + __field( long long, partial ) + __field( ext4_fsblk_t, ee_pblk ) + __field( ext4_lblk_t, ee_lblk ) + __field( unsigned short, ee_len ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->from = from; + __entry->to = to; + __entry->partial = partial_cluster; + __entry->ee_pblk = ext4_ext_pblock(ex); + __entry->ee_lblk = le32_to_cpu(ex->ee_block); + __entry->ee_len = ext4_ext_get_actual_len(ex); + ), + + TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]" + "from %u to %u partial_cluster %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->ee_lblk, + (unsigned long long) __entry->ee_pblk, + (unsigned short) __entry->ee_len, + (unsigned) __entry->from, + (unsigned) __entry->to, + (long long) __entry->partial) +); + +TRACE_EVENT(ext4_ext_rm_leaf, + TP_PROTO(struct inode *inode, ext4_lblk_t start, + struct ext4_extent *ex, + long long partial_cluster), + + TP_ARGS(inode, start, ex, partial_cluster), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( long long, partial ) + __field( ext4_lblk_t, start ) + __field( ext4_lblk_t, ee_lblk ) + __field( ext4_fsblk_t, ee_pblk ) + __field( short, ee_len ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->partial = partial_cluster; + __entry->start = start; + __entry->ee_lblk = le32_to_cpu(ex->ee_block); + __entry->ee_pblk = ext4_ext_pblock(ex); + __entry->ee_len = ext4_ext_get_actual_len(ex); + ), + + TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]" + "partial_cluster %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->start, + (unsigned) __entry->ee_lblk, + (unsigned long long) __entry->ee_pblk, + (unsigned short) __entry->ee_len, + (long long) __entry->partial) +); + +TRACE_EVENT(ext4_ext_rm_idx, + TP_PROTO(struct inode *inode, ext4_fsblk_t pblk), + + TP_ARGS(inode, pblk), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_fsblk_t, pblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pblk = pblk; + ), + + TP_printk("dev %d,%d ino %lu index_pblk %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long long) __entry->pblk) +); + +TRACE_EVENT(ext4_ext_remove_space, + TP_PROTO(struct inode *inode, ext4_lblk_t start, + ext4_lblk_t end, int depth), + + TP_ARGS(inode, start, end, depth), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, start ) + __field( ext4_lblk_t, end ) + __field( int, depth ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->start = start; + __entry->end = end; + __entry->depth = depth; + ), + + TP_printk("dev %d,%d ino %lu since %u end %u depth %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->start, + (unsigned) __entry->end, + __entry->depth) +); + +TRACE_EVENT(ext4_ext_remove_space_done, + TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end, + int depth, long long partial, __le16 eh_entries), + + TP_ARGS(inode, start, end, depth, partial, eh_entries), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, start ) + __field( ext4_lblk_t, end ) + __field( int, depth ) + __field( long long, partial ) + __field( unsigned short, eh_entries ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->start = start; + __entry->end = end; + __entry->depth = depth; + __entry->partial = partial; + __entry->eh_entries = le16_to_cpu(eh_entries); + ), + + TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld " + "remaining_entries %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned) __entry->start, + (unsigned) __entry->end, + __entry->depth, + (long long) __entry->partial, + (unsigned short) __entry->eh_entries) +); + +DECLARE_EVENT_CLASS(ext4__es_extent, + TP_PROTO(struct inode *inode, struct extent_status *es), + + TP_ARGS(inode, es), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( ext4_lblk_t, len ) + __field( ext4_fsblk_t, pblk ) + __field( char, status ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = es->es_lblk; + __entry->len = es->es_len; + __entry->pblk = ext4_es_pblock(es); + __entry->status = ext4_es_status(es); + ), + + TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len, + __entry->pblk, show_extent_status(__entry->status)) +); + +DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent, + TP_PROTO(struct inode *inode, struct extent_status *es), + + TP_ARGS(inode, es) +); + +DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent, + TP_PROTO(struct inode *inode, struct extent_status *es), + + TP_ARGS(inode, es) +); + +TRACE_EVENT(ext4_es_remove_extent, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len), + + TP_ARGS(inode, lblk, len), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, lblk ) + __field( loff_t, len ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + __entry->len = len; + ), + + TP_printk("dev %d,%d ino %lu es [%lld/%lld)", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len) +); + +TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk), + + TP_ARGS(inode, lblk), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + ), + + TP_printk("dev %d,%d ino %lu lblk %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->lblk) +); + +TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, + TP_PROTO(struct inode *inode, struct extent_status *es), + + TP_ARGS(inode, es), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( ext4_lblk_t, len ) + __field( ext4_fsblk_t, pblk ) + __field( char, status ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = es->es_lblk; + __entry->len = es->es_len; + __entry->pblk = ext4_es_pblock(es); + __entry->status = ext4_es_status(es); + ), + + TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len, + __entry->pblk, show_extent_status(__entry->status)) +); + +TRACE_EVENT(ext4_es_lookup_extent_enter, + TP_PROTO(struct inode *inode, ext4_lblk_t lblk), + + TP_ARGS(inode, lblk), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = lblk; + ), + + TP_printk("dev %d,%d ino %lu lblk %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->lblk) +); + +TRACE_EVENT(ext4_es_lookup_extent_exit, + TP_PROTO(struct inode *inode, struct extent_status *es, + int found), + + TP_ARGS(inode, es, found), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( ext4_lblk_t, len ) + __field( ext4_fsblk_t, pblk ) + __field( char, status ) + __field( int, found ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = es->es_lblk; + __entry->len = es->es_len; + __entry->pblk = ext4_es_pblock(es); + __entry->status = ext4_es_status(es); + __entry->found = found; + ), + + TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->found, + __entry->lblk, __entry->len, + __entry->found ? __entry->pblk : 0, + show_extent_status(__entry->found ? __entry->status : 0)) +); + +DECLARE_EVENT_CLASS(ext4__es_shrink_enter, + TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), + + TP_ARGS(sb, nr_to_scan, cache_cnt), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, nr_to_scan ) + __field( int, cache_cnt ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nr_to_scan = nr_to_scan; + __entry->cache_cnt = cache_cnt; + ), + + TP_printk("dev %d,%d nr_to_scan %d cache_cnt %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_to_scan, __entry->cache_cnt) +); + +DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_count, + TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), + + TP_ARGS(sb, nr_to_scan, cache_cnt) +); + +DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_scan_enter, + TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), + + TP_ARGS(sb, nr_to_scan, cache_cnt) +); + +TRACE_EVENT(ext4_es_shrink_scan_exit, + TP_PROTO(struct super_block *sb, int nr_shrunk, int cache_cnt), + + TP_ARGS(sb, nr_shrunk, cache_cnt), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, nr_shrunk ) + __field( int, cache_cnt ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nr_shrunk = nr_shrunk; + __entry->cache_cnt = cache_cnt; + ), + + TP_printk("dev %d,%d nr_shrunk %d cache_cnt %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_shrunk, __entry->cache_cnt) +); + +TRACE_EVENT(ext4_collapse_range, + TP_PROTO(struct inode *inode, loff_t offset, loff_t len), + + TP_ARGS(inode, offset, len), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, offset) + __field(loff_t, len) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->offset = offset; + __entry->len = len; + ), + + TP_printk("dev %d,%d ino %lu offset %lld len %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->offset, __entry->len) +); + +TRACE_EVENT(ext4_es_shrink, + TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, + int nr_skipped, int retried), + + TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, nr_shrunk ) + __field( unsigned long long, scan_time ) + __field( int, nr_skipped ) + __field( int, retried ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nr_shrunk = nr_shrunk; + __entry->scan_time = div_u64(scan_time, 1000); + __entry->nr_skipped = nr_skipped; + __entry->retried = retried; + ), + + TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu " + "nr_skipped %d retried %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk, + __entry->scan_time, __entry->nr_skipped, __entry->retried) +); + +#endif /* _TRACE_EXT4_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/f2fs.h b/kernel/include/trace/events/f2fs.h new file mode 100644 index 000000000..e202dec22 --- /dev/null +++ b/kernel/include/trace/events/f2fs.h @@ -0,0 +1,1207 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM f2fs + +#if !defined(_TRACE_F2FS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_F2FS_H + +#include <linux/tracepoint.h> + +#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev) +#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino + +TRACE_DEFINE_ENUM(NODE); +TRACE_DEFINE_ENUM(DATA); +TRACE_DEFINE_ENUM(META); +TRACE_DEFINE_ENUM(META_FLUSH); +TRACE_DEFINE_ENUM(CURSEG_HOT_DATA); +TRACE_DEFINE_ENUM(CURSEG_WARM_DATA); +TRACE_DEFINE_ENUM(CURSEG_COLD_DATA); +TRACE_DEFINE_ENUM(CURSEG_HOT_NODE); +TRACE_DEFINE_ENUM(CURSEG_WARM_NODE); +TRACE_DEFINE_ENUM(CURSEG_COLD_NODE); +TRACE_DEFINE_ENUM(NO_CHECK_TYPE); +TRACE_DEFINE_ENUM(GC_GREEDY); +TRACE_DEFINE_ENUM(GC_CB); +TRACE_DEFINE_ENUM(FG_GC); +TRACE_DEFINE_ENUM(BG_GC); +TRACE_DEFINE_ENUM(LFS); +TRACE_DEFINE_ENUM(SSR); +TRACE_DEFINE_ENUM(__REQ_RAHEAD); +TRACE_DEFINE_ENUM(__REQ_WRITE); +TRACE_DEFINE_ENUM(__REQ_SYNC); +TRACE_DEFINE_ENUM(__REQ_NOIDLE); +TRACE_DEFINE_ENUM(__REQ_FLUSH); +TRACE_DEFINE_ENUM(__REQ_FUA); +TRACE_DEFINE_ENUM(__REQ_PRIO); +TRACE_DEFINE_ENUM(__REQ_META); +TRACE_DEFINE_ENUM(CP_UMOUNT); +TRACE_DEFINE_ENUM(CP_FASTBOOT); +TRACE_DEFINE_ENUM(CP_SYNC); +TRACE_DEFINE_ENUM(CP_DISCARD); + +#define show_block_type(type) \ + __print_symbolic(type, \ + { NODE, "NODE" }, \ + { DATA, "DATA" }, \ + { META, "META" }, \ + { META_FLUSH, "META_FLUSH" }, \ + { INMEM, "INMEM" }, \ + { INMEM_DROP, "INMEM_DROP" }, \ + { IPU, "IN-PLACE" }, \ + { OPU, "OUT-OF-PLACE" }) + +#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) +#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) + +#define show_bio_type(type) show_bio_base(type), show_bio_extra(type) + +#define show_bio_base(type) \ + __print_symbolic(F2FS_BIO_MASK(type), \ + { READ, "READ" }, \ + { READA, "READAHEAD" }, \ + { READ_SYNC, "READ_SYNC" }, \ + { WRITE, "WRITE" }, \ + { WRITE_SYNC, "WRITE_SYNC" }, \ + { WRITE_FLUSH, "WRITE_FLUSH" }, \ + { WRITE_FUA, "WRITE_FUA" }, \ + { WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" }) + +#define show_bio_extra(type) \ + __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ + { REQ_META, "(M)" }, \ + { REQ_PRIO, "(P)" }, \ + { REQ_META | REQ_PRIO, "(MP)" }, \ + { 0, " \b" }) + +#define show_data_type(type) \ + __print_symbolic(type, \ + { CURSEG_HOT_DATA, "Hot DATA" }, \ + { CURSEG_WARM_DATA, "Warm DATA" }, \ + { CURSEG_COLD_DATA, "Cold DATA" }, \ + { CURSEG_HOT_NODE, "Hot NODE" }, \ + { CURSEG_WARM_NODE, "Warm NODE" }, \ + { CURSEG_COLD_NODE, "Cold NODE" }, \ + { NO_CHECK_TYPE, "No TYPE" }) + +#define show_file_type(type) \ + __print_symbolic(type, \ + { 0, "FILE" }, \ + { 1, "DIR" }) + +#define show_gc_type(type) \ + __print_symbolic(type, \ + { FG_GC, "Foreground GC" }, \ + { BG_GC, "Background GC" }) + +#define show_alloc_mode(type) \ + __print_symbolic(type, \ + { LFS, "LFS-mode" }, \ + { SSR, "SSR-mode" }) + +#define show_victim_policy(type) \ + __print_symbolic(type, \ + { GC_GREEDY, "Greedy" }, \ + { GC_CB, "Cost-Benefit" }) + +#define show_cpreason(type) \ + __print_symbolic(type, \ + { CP_UMOUNT, "Umount" }, \ + { CP_FASTBOOT, "Fastboot" }, \ + { CP_SYNC, "Sync" }, \ + { CP_RECOVERY, "Recovery" }, \ + { CP_DISCARD, "Discard" }) + +struct victim_sel_policy; + +DECLARE_EVENT_CLASS(f2fs__inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(ino_t, pino) + __field(umode_t, mode) + __field(loff_t, size) + __field(unsigned int, nlink) + __field(blkcnt_t, blocks) + __field(__u8, advise) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pino = F2FS_I(inode)->i_pino; + __entry->mode = inode->i_mode; + __entry->nlink = inode->i_nlink; + __entry->size = inode->i_size; + __entry->blocks = inode->i_blocks; + __entry->advise = F2FS_I(inode)->i_advise; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, " + "i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x", + show_dev_ino(__entry), + (unsigned long)__entry->pino, + __entry->mode, + __entry->size, + (unsigned int)__entry->nlink, + (unsigned long long)__entry->blocks, + (unsigned char)__entry->advise) +); + +DECLARE_EVENT_CLASS(f2fs__inode_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, ret = %d", + show_dev_ino(__entry), + __entry->ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(f2fs_sync_file_exit, + + TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret), + + TP_ARGS(inode, need_cp, datasync, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, need_cp) + __field(int, datasync) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->need_cp = need_cp; + __entry->datasync = datasync; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, " + "datasync = %d, ret = %d", + show_dev_ino(__entry), + __entry->need_cp ? "needed" : "not needed", + __entry->datasync, + __entry->ret) +); + +TRACE_EVENT(f2fs_sync_fs, + + TP_PROTO(struct super_block *sb, int wait), + + TP_ARGS(sb, wait), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, dirty) + __field(int, wait) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY); + __entry->wait = wait; + ), + + TP_printk("dev = (%d,%d), superblock is %s, wait = %d", + show_dev(__entry), + __entry->dirty ? "dirty" : "not dirty", + __entry->wait) +); + +DEFINE_EVENT(f2fs__inode, f2fs_iget, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_iget_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_evict_inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_new_inode, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +TRACE_EVENT(f2fs_unlink_enter, + + TP_PROTO(struct inode *dir, struct dentry *dentry), + + TP_ARGS(dir, dentry), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, size) + __field(blkcnt_t, blocks) + __field(const char *, name) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->size = dir->i_size; + __entry->blocks = dir->i_blocks; + __entry->name = dentry->d_name.name; + ), + + TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " + "i_blocks = %llu, name = %s", + show_dev_ino(__entry), + __entry->size, + (unsigned long long)__entry->blocks, + __entry->name) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_truncate, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(f2fs_truncate_data_blocks_range, + + TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs, int free), + + TP_ARGS(inode, nid, ofs, free), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(nid_t, nid) + __field(unsigned int, ofs) + __field(int, free) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nid = nid; + __entry->ofs = ofs; + __entry->free = free; + ), + + TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d", + show_dev_ino(__entry), + (unsigned int)__entry->nid, + __entry->ofs, + __entry->free) +); + +DECLARE_EVENT_CLASS(f2fs__truncate_op, + + TP_PROTO(struct inode *inode, u64 from), + + TP_ARGS(inode, from), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, size) + __field(blkcnt_t, blocks) + __field(u64, from) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->size = inode->i_size; + __entry->blocks = inode->i_blocks; + __entry->from = from; + ), + + TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, " + "start file offset = %llu", + show_dev_ino(__entry), + __entry->size, + (unsigned long long)__entry->blocks, + (unsigned long long)__entry->from) +); + +DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_blocks_enter, + + TP_PROTO(struct inode *inode, u64 from), + + TP_ARGS(inode, from) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_blocks_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_inode_blocks_enter, + + TP_PROTO(struct inode *inode, u64 from), + + TP_ARGS(inode, from) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_inode_blocks_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DECLARE_EVENT_CLASS(f2fs__truncate_node, + + TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + + TP_ARGS(inode, nid, blk_addr), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(nid_t, nid) + __field(block_t, blk_addr) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nid = nid; + __entry->blk_addr = blk_addr; + ), + + TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx", + show_dev_ino(__entry), + (unsigned int)__entry->nid, + (unsigned long long)__entry->blk_addr) +); + +DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_nodes_enter, + + TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + + TP_ARGS(inode, nid, blk_addr) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_nodes_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node, + + TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + + TP_ARGS(inode, nid, blk_addr) +); + +TRACE_EVENT(f2fs_truncate_partial_nodes, + + TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err), + + TP_ARGS(inode, nid, depth, err), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(nid_t, nid[3]) + __field(int, depth) + __field(int, err) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nid[0] = nid[0]; + __entry->nid[1] = nid[1]; + __entry->nid[2] = nid[2]; + __entry->depth = depth; + __entry->err = err; + ), + + TP_printk("dev = (%d,%d), ino = %lu, " + "nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d", + show_dev_ino(__entry), + (unsigned int)__entry->nid[0], + (unsigned int)__entry->nid[1], + (unsigned int)__entry->nid[2], + __entry->depth, + __entry->err) +); + +TRACE_EVENT(f2fs_get_data_block, + TP_PROTO(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int ret), + + TP_ARGS(inode, iblock, bh, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(sector_t, iblock) + __field(sector_t, bh_start) + __field(size_t, bh_size) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->iblock = iblock; + __entry->bh_start = bh->b_blocknr; + __entry->bh_size = bh->b_size; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, " + "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d", + show_dev_ino(__entry), + (unsigned long long)__entry->iblock, + (unsigned long long)__entry->bh_start, + (unsigned long long)__entry->bh_size, + __entry->ret) +); + +TRACE_EVENT(f2fs_get_victim, + + TP_PROTO(struct super_block *sb, int type, int gc_type, + struct victim_sel_policy *p, unsigned int pre_victim, + unsigned int prefree, unsigned int free), + + TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, type) + __field(int, gc_type) + __field(int, alloc_mode) + __field(int, gc_mode) + __field(unsigned int, victim) + __field(unsigned int, ofs_unit) + __field(unsigned int, pre_victim) + __field(unsigned int, prefree) + __field(unsigned int, free) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->type = type; + __entry->gc_type = gc_type; + __entry->alloc_mode = p->alloc_mode; + __entry->gc_mode = p->gc_mode; + __entry->victim = p->min_segno; + __entry->ofs_unit = p->ofs_unit; + __entry->pre_victim = pre_victim; + __entry->prefree = prefree; + __entry->free = free; + ), + + TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u " + "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u", + show_dev(__entry), + show_data_type(__entry->type), + show_gc_type(__entry->gc_type), + show_alloc_mode(__entry->alloc_mode), + show_victim_policy(__entry->gc_mode), + __entry->victim, + __entry->ofs_unit, + (int)__entry->pre_victim, + __entry->prefree, + __entry->free) +); + +TRACE_EVENT(f2fs_fallocate, + + TP_PROTO(struct inode *inode, int mode, + loff_t offset, loff_t len, int ret), + + TP_ARGS(inode, mode, offset, len, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, mode) + __field(loff_t, offset) + __field(loff_t, len) + __field(loff_t, size) + __field(blkcnt_t, blocks) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = mode; + __entry->offset = offset; + __entry->len = len; + __entry->size = inode->i_size; + __entry->blocks = inode->i_blocks; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, " + "len = %lld, i_size = %lld, i_blocks = %llu, ret = %d", + show_dev_ino(__entry), + __entry->mode, + (unsigned long long)__entry->offset, + (unsigned long long)__entry->len, + (unsigned long long)__entry->size, + (unsigned long long)__entry->blocks, + __entry->ret) +); + +TRACE_EVENT(f2fs_direct_IO_enter, + + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), + + TP_ARGS(inode, offset, len, rw), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, pos) + __field(unsigned long, len) + __field(int, rw) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + ), + + TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu rw = %d", + show_dev_ino(__entry), + __entry->pos, + __entry->len, + __entry->rw) +); + +TRACE_EVENT(f2fs_direct_IO_exit, + + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, + int rw, int ret), + + TP_ARGS(inode, offset, len, rw, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, pos) + __field(unsigned long, len) + __field(int, rw) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = offset; + __entry->len = len; + __entry->rw = rw; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu " + "rw = %d ret = %d", + show_dev_ino(__entry), + __entry->pos, + __entry->len, + __entry->rw, + __entry->ret) +); + +TRACE_EVENT(f2fs_reserve_new_block, + + TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node), + + TP_ARGS(inode, nid, ofs_in_node), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(nid_t, nid) + __field(unsigned int, ofs_in_node) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = nid; + __entry->ofs_in_node = ofs_in_node; + ), + + TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u", + show_dev(__entry), + (unsigned int)__entry->nid, + __entry->ofs_in_node) +); + +DECLARE_EVENT_CLASS(f2fs__submit_page_bio, + + TP_PROTO(struct page *page, struct f2fs_io_info *fio), + + TP_ARGS(page, fio), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(pgoff_t, index) + __field(block_t, blkaddr) + __field(int, rw) + __field(int, type) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->index = page->index; + __entry->blkaddr = fio->blk_addr; + __entry->rw = fio->rw; + __entry->type = fio->type; + ), + + TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " + "blkaddr = 0x%llx, rw = %s%s, type = %s", + show_dev_ino(__entry), + (unsigned long)__entry->index, + (unsigned long long)__entry->blkaddr, + show_bio_type(__entry->rw), + show_block_type(__entry->type)) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio, + + TP_PROTO(struct page *page, struct f2fs_io_info *fio), + + TP_ARGS(page, fio), + + TP_CONDITION(page->mapping) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio, + + TP_PROTO(struct page *page, struct f2fs_io_info *fio), + + TP_ARGS(page, fio), + + TP_CONDITION(page->mapping) +); + +DECLARE_EVENT_CLASS(f2fs__submit_bio, + + TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, + struct bio *bio), + + TP_ARGS(sb, fio, bio), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, rw) + __field(int, type) + __field(sector_t, sector) + __field(unsigned int, size) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->rw = fio->rw; + __entry->type = fio->type; + __entry->sector = bio->bi_iter.bi_sector; + __entry->size = bio->bi_iter.bi_size; + ), + + TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u", + show_dev(__entry), + show_bio_type(__entry->rw), + show_block_type(__entry->type), + (unsigned long long)__entry->sector, + __entry->size) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, + + TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, + struct bio *bio), + + TP_ARGS(sb, fio, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, + + TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, + struct bio *bio), + + TP_ARGS(sb, fio, bio), + + TP_CONDITION(bio) +); + +TRACE_EVENT(f2fs_write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, pos) + __field(unsigned int, len) + __field(unsigned int, flags) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u", + show_dev_ino(__entry), + (unsigned long long)__entry->pos, + __entry->len, + __entry->flags) +); + +TRACE_EVENT(f2fs_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, pos) + __field(unsigned int, len) + __field(unsigned int, copied) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->len = len; + __entry->copied = copied; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, copied = %u", + show_dev_ino(__entry), + (unsigned long long)__entry->pos, + __entry->len, + __entry->copied) +); + +DECLARE_EVENT_CLASS(f2fs__page, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, type) + __field(int, dir) + __field(pgoff_t, index) + __field(int, dirty) + __field(int, uptodate) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->type = type; + __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->index = page->index; + __entry->dirty = PageDirty(page); + __entry->uptodate = PageUptodate(page); + ), + + TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, " + "dirty = %d, uptodate = %d", + show_dev_ino(__entry), + show_block_type(__entry->type), + show_file_type(__entry->dir), + (unsigned long)__entry->index, + __entry->dirty, + __entry->uptodate) +); + +DEFINE_EVENT(f2fs__page, f2fs_writepage, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_readpage, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +TRACE_EVENT(f2fs_writepages, + + TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type), + + TP_ARGS(inode, wbc, type), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, type) + __field(int, dir) + __field(long, nr_to_write) + __field(long, pages_skipped) + __field(loff_t, range_start) + __field(loff_t, range_end) + __field(pgoff_t, writeback_index) + __field(int, sync_mode) + __field(char, for_kupdate) + __field(char, for_background) + __field(char, tagged_writepages) + __field(char, for_reclaim) + __field(char, range_cyclic) + __field(char, for_sync) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->type = type; + __entry->dir = S_ISDIR(inode->i_mode); + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->range_start = wbc->range_start; + __entry->range_end = wbc->range_end; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->sync_mode = wbc->sync_mode; + __entry->for_kupdate = wbc->for_kupdate; + __entry->for_background = wbc->for_background; + __entry->tagged_writepages = wbc->tagged_writepages; + __entry->for_reclaim = wbc->for_reclaim; + __entry->range_cyclic = wbc->range_cyclic; + __entry->for_sync = wbc->for_sync; + ), + + TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, " + "skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, " + "kupdate %u background %u tagged %u reclaim %u cyclic %u sync %u", + show_dev_ino(__entry), + show_block_type(__entry->type), + show_file_type(__entry->dir), + __entry->nr_to_write, + __entry->pages_skipped, + __entry->range_start, + __entry->range_end, + (unsigned long)__entry->writeback_index, + __entry->sync_mode, + __entry->for_kupdate, + __entry->for_background, + __entry->tagged_writepages, + __entry->for_reclaim, + __entry->range_cyclic, + __entry->for_sync) +); + +TRACE_EVENT(f2fs_write_checkpoint, + + TP_PROTO(struct super_block *sb, int reason, char *msg), + + TP_ARGS(sb, reason, msg), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, reason) + __field(char *, msg) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->reason = reason; + __entry->msg = msg; + ), + + TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", + show_dev(__entry), + show_cpreason(__entry->reason), + __entry->msg) +); + +TRACE_EVENT(f2fs_issue_discard, + + TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen), + + TP_ARGS(sb, blkstart, blklen), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(block_t, blkstart) + __field(block_t, blklen) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->blkstart = blkstart; + __entry->blklen = blklen; + ), + + TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", + show_dev(__entry), + (unsigned long long)__entry->blkstart, + (unsigned long long)__entry->blklen) +); + +TRACE_EVENT(f2fs_issue_flush, + + TP_PROTO(struct super_block *sb, unsigned int nobarrier, + unsigned int flush_merge), + + TP_ARGS(sb, nobarrier, flush_merge), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, nobarrier) + __field(unsigned int, flush_merge) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nobarrier = nobarrier; + __entry->flush_merge = flush_merge; + ), + + TP_printk("dev = (%d,%d), %s %s", + show_dev(__entry), + __entry->nobarrier ? "skip (nobarrier)" : "issue", + __entry->flush_merge ? " with flush_merge" : "") +); + +TRACE_EVENT(f2fs_lookup_extent_tree_start, + + TP_PROTO(struct inode *inode, unsigned int pgofs), + + TP_ARGS(inode, pgofs), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u", + show_dev_ino(__entry), + __entry->pgofs) +); + +TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, + + TP_PROTO(struct inode *inode, unsigned int pgofs, + struct extent_node *en), + + TP_ARGS(inode, pgofs, en), + + TP_CONDITION(en), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + __field(unsigned int, fofs) + __field(u32, blk) + __field(unsigned int, len) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + __entry->fofs = en->ei.fofs; + __entry->blk = en->ei.blk; + __entry->len = en->ei.len; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " + "ext_info(fofs: %u, blk: %u, len: %u)", + show_dev_ino(__entry), + __entry->pgofs, + __entry->fofs, + __entry->blk, + __entry->len) +); + +TRACE_EVENT(f2fs_update_extent_tree, + + TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr), + + TP_ARGS(inode, pgofs, blkaddr), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + __field(u32, blk) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + __entry->blk = blkaddr; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u", + show_dev_ino(__entry), + __entry->pgofs, + __entry->blk) +); + +TRACE_EVENT(f2fs_shrink_extent_tree, + + TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt, + unsigned int tree_cnt), + + TP_ARGS(sbi, node_cnt, tree_cnt), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, node_cnt) + __field(unsigned int, tree_cnt) + ), + + TP_fast_assign( + __entry->dev = sbi->sb->s_dev; + __entry->node_cnt = node_cnt; + __entry->tree_cnt = tree_cnt; + ), + + TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u", + show_dev(__entry), + __entry->node_cnt, + __entry->tree_cnt) +); + +TRACE_EVENT(f2fs_destroy_extent_tree, + + TP_PROTO(struct inode *inode, unsigned int node_cnt), + + TP_ARGS(inode, node_cnt), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, node_cnt) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->node_cnt = node_cnt; + ), + + TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u", + show_dev_ino(__entry), + __entry->node_cnt) +); + +#endif /* _TRACE_F2FS_H */ + + /* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/fence.h b/kernel/include/trace/events/fence.h new file mode 100644 index 000000000..98feb1b82 --- /dev/null +++ b/kernel/include/trace/events/fence.h @@ -0,0 +1,128 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fence + +#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FENCE_H + +#include <linux/tracepoint.h> + +struct fence; + +TRACE_EVENT(fence_annotate_wait_on, + + /* fence: the fence waiting on f1, f1: the fence to be waited on. */ + TP_PROTO(struct fence *fence, struct fence *f1), + + TP_ARGS(fence, f1), + + TP_STRUCT__entry( + __string(driver, fence->ops->get_driver_name(fence)) + __string(timeline, fence->ops->get_driver_name(fence)) + __field(unsigned int, context) + __field(unsigned int, seqno) + + __string(waiting_driver, f1->ops->get_driver_name(f1)) + __string(waiting_timeline, f1->ops->get_timeline_name(f1)) + __field(unsigned int, waiting_context) + __field(unsigned int, waiting_seqno) + ), + + TP_fast_assign( + __assign_str(driver, fence->ops->get_driver_name(fence)) + __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __entry->context = fence->context; + __entry->seqno = fence->seqno; + + __assign_str(waiting_driver, f1->ops->get_driver_name(f1)) + __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1)) + __entry->waiting_context = f1->context; + __entry->waiting_seqno = f1->seqno; + + ), + + TP_printk("driver=%s timeline=%s context=%u seqno=%u " \ + "waits on driver=%s timeline=%s context=%u seqno=%u", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->seqno, + __get_str(waiting_driver), __get_str(waiting_timeline), + __entry->waiting_context, __entry->waiting_seqno) +); + +DECLARE_EVENT_CLASS(fence, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, fence->ops->get_driver_name(fence)) + __string(timeline, fence->ops->get_timeline_name(fence)) + __field(unsigned int, context) + __field(unsigned int, seqno) + ), + + TP_fast_assign( + __assign_str(driver, fence->ops->get_driver_name(fence)) + __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __entry->context = fence->context; + __entry->seqno = fence->seqno; + ), + + TP_printk("driver=%s timeline=%s context=%u seqno=%u", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->seqno) +); + +DEFINE_EVENT(fence, fence_emit, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_init, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_destroy, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_enable_signal, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_signaled, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_wait_start, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(fence, fence_wait_end, + + TP_PROTO(struct fence *fence), + + TP_ARGS(fence) +); + +#endif /* _TRACE_FENCE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/filelock.h b/kernel/include/trace/events/filelock.h new file mode 100644 index 000000000..a0d008070 --- /dev/null +++ b/kernel/include/trace/events/filelock.h @@ -0,0 +1,96 @@ +/* + * Events for filesystem locks + * + * Copyright 2013 Jeff Layton <jlayton@poochiereds.net> + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM filelock + +#if !defined(_TRACE_FILELOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FILELOCK_H + +#include <linux/tracepoint.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/kdev_t.h> + +#define show_fl_flags(val) \ + __print_flags(val, "|", \ + { FL_POSIX, "FL_POSIX" }, \ + { FL_FLOCK, "FL_FLOCK" }, \ + { FL_DELEG, "FL_DELEG" }, \ + { FL_ACCESS, "FL_ACCESS" }, \ + { FL_EXISTS, "FL_EXISTS" }, \ + { FL_LEASE, "FL_LEASE" }, \ + { FL_CLOSE, "FL_CLOSE" }, \ + { FL_SLEEP, "FL_SLEEP" }, \ + { FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \ + { FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \ + { FL_OFDLCK, "FL_OFDLCK" }) + +#define show_fl_type(val) \ + __print_symbolic(val, \ + { F_RDLCK, "F_RDLCK" }, \ + { F_WRLCK, "F_WRLCK" }, \ + { F_UNLCK, "F_UNLCK" }) + +DECLARE_EVENT_CLASS(filelock_lease, + + TP_PROTO(struct inode *inode, struct file_lock *fl), + + TP_ARGS(inode, fl), + + TP_STRUCT__entry( + __field(struct file_lock *, fl) + __field(unsigned long, i_ino) + __field(dev_t, s_dev) + __field(struct file_lock *, fl_next) + __field(fl_owner_t, fl_owner) + __field(unsigned int, fl_flags) + __field(unsigned char, fl_type) + __field(unsigned long, fl_break_time) + __field(unsigned long, fl_downgrade_time) + ), + + TP_fast_assign( + __entry->fl = fl ? fl : NULL; + __entry->s_dev = inode->i_sb->s_dev; + __entry->i_ino = inode->i_ino; + __entry->fl_next = fl ? fl->fl_next : NULL; + __entry->fl_owner = fl ? fl->fl_owner : NULL; + __entry->fl_flags = fl ? fl->fl_flags : 0; + __entry->fl_type = fl ? fl->fl_type : 0; + __entry->fl_break_time = fl ? fl->fl_break_time : 0; + __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0; + ), + + TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", + __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), + __entry->i_ino, __entry->fl_next, __entry->fl_owner, + show_fl_flags(__entry->fl_flags), + show_fl_type(__entry->fl_type), + __entry->fl_break_time, __entry->fl_downgrade_time) +); + +DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, generic_add_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_ARGS(inode, fl)); + +#endif /* _TRACE_FILELOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/filemap.h b/kernel/include/trace/events/filemap.h new file mode 100644 index 000000000..42febb6bc --- /dev/null +++ b/kernel/include/trace/events/filemap.h @@ -0,0 +1,58 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM filemap + +#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FILEMAP_H + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <linux/mm.h> +#include <linux/memcontrol.h> +#include <linux/device.h> +#include <linux/kdev_t.h> + +DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, + + TP_PROTO(struct page *page), + + TP_ARGS(page), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned long, i_ino) + __field(unsigned long, index) + __field(dev_t, s_dev) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->i_ino = page->mapping->host->i_ino; + __entry->index = page->index; + if (page->mapping->host->i_sb) + __entry->s_dev = page->mapping->host->i_sb->s_dev; + else + __entry->s_dev = page->mapping->host->i_rdev; + ), + + TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu", + MAJOR(__entry->s_dev), MINOR(__entry->s_dev), + __entry->i_ino, + pfn_to_page(__entry->pfn), + __entry->pfn, + __entry->index << PAGE_SHIFT) +); + +DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache, + TP_PROTO(struct page *page), + TP_ARGS(page) + ); + +DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache, + TP_PROTO(struct page *page), + TP_ARGS(page) + ); + +#endif /* _TRACE_FILEMAP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/gfpflags.h b/kernel/include/trace/events/gfpflags.h new file mode 100644 index 000000000..d6fd8e5b1 --- /dev/null +++ b/kernel/include/trace/events/gfpflags.h @@ -0,0 +1,42 @@ +/* + * The order of these masks is important. Matching masks will be seen + * first and the left over flags will end up showing by themselves. + * + * For example, if we have GFP_KERNEL before GFP_USER we wil get: + * + * GFP_KERNEL|GFP_HARDWALL + * + * Thus most bits set go first. + */ +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ + {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ + {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ + {(unsigned long)GFP_USER, "GFP_USER"}, \ + {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ + {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ + {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ + {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ + {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ + {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ + {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \ + {(unsigned long)__GFP_IO, "GFP_IO"}, \ + {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ + {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ + {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ + {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ + {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ + {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ + {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ + {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ + {(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \ + {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ + {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ + {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ + {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ + {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ + {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ + {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ + ) : "GFP_NOWAIT" + diff --git a/kernel/include/trace/events/gpio.h b/kernel/include/trace/events/gpio.h new file mode 100644 index 000000000..927a8ad9e --- /dev/null +++ b/kernel/include/trace/events/gpio.h @@ -0,0 +1,56 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpio + +#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_GPIO_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(gpio_direction, + + TP_PROTO(unsigned gpio, int in, int err), + + TP_ARGS(gpio, in, err), + + TP_STRUCT__entry( + __field(unsigned, gpio) + __field(int, in) + __field(int, err) + ), + + TP_fast_assign( + __entry->gpio = gpio; + __entry->in = in; + __entry->err = err; + ), + + TP_printk("%u %3s (%d)", __entry->gpio, + __entry->in ? "in" : "out", __entry->err) +); + +TRACE_EVENT(gpio_value, + + TP_PROTO(unsigned gpio, int get, int value), + + TP_ARGS(gpio, get, value), + + TP_STRUCT__entry( + __field(unsigned, gpio) + __field(int, get) + __field(int, value) + ), + + TP_fast_assign( + __entry->gpio = gpio; + __entry->get = get; + __entry->value = value; + ), + + TP_printk("%u %3s %d", __entry->gpio, + __entry->get ? "get" : "set", __entry->value) +); + +#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/hist.h b/kernel/include/trace/events/hist.h new file mode 100644 index 000000000..6122e4286 --- /dev/null +++ b/kernel/include/trace/events/hist.h @@ -0,0 +1,72 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hist + +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HIST_H + +#include "latency_hist.h" +#include <linux/tracepoint.h> + +#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) +#define trace_preemptirqsoff_hist(a, b) +#else +TRACE_EVENT(preemptirqsoff_hist, + + TP_PROTO(int reason, int starthist), + + TP_ARGS(reason, starthist), + + TP_STRUCT__entry( + __field(int, reason) + __field(int, starthist) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->starthist = starthist; + ), + + TP_printk("reason=%s starthist=%s", getaction(__entry->reason), + __entry->starthist ? "start" : "stop") +); +#endif + +#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST +#define trace_hrtimer_interrupt(a, b, c, d) +#else +TRACE_EVENT(hrtimer_interrupt, + + TP_PROTO(int cpu, long long offset, struct task_struct *curr, + struct task_struct *task), + + TP_ARGS(cpu, offset, curr, task), + + TP_STRUCT__entry( + __field(int, cpu) + __field(long long, offset) + __array(char, ccomm, TASK_COMM_LEN) + __field(int, cprio) + __array(char, tcomm, TASK_COMM_LEN) + __field(int, tprio) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->offset = offset; + memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); + __entry->cprio = curr->prio; + memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", + task != NULL ? TASK_COMM_LEN : 7); + __entry->tprio = task != NULL ? task->prio : -1; + ), + + TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", + __entry->cpu, __entry->offset, __entry->ccomm, + __entry->cprio, __entry->tcomm, __entry->tprio) +); +#endif + +#endif /* _TRACE_HIST_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/host1x.h b/kernel/include/trace/events/host1x.h new file mode 100644 index 000000000..631163625 --- /dev/null +++ b/kernel/include/trace/events/host1x.h @@ -0,0 +1,256 @@ +/* + * include/trace/events/host1x.h + * + * host1x event logging to ftrace. + * + * Copyright (c) 2010-2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM host1x + +#if !defined(_TRACE_HOST1X_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HOST1X_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +struct host1x_bo; + +DECLARE_EVENT_CLASS(host1x, + TP_PROTO(const char *name), + TP_ARGS(name), + TP_STRUCT__entry(__field(const char *, name)), + TP_fast_assign(__entry->name = name;), + TP_printk("name=%s", __entry->name) +); + +DEFINE_EVENT(host1x, host1x_channel_open, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +DEFINE_EVENT(host1x, host1x_channel_release, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +DEFINE_EVENT(host1x, host1x_cdma_begin, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +DEFINE_EVENT(host1x, host1x_cdma_end, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +TRACE_EVENT(host1x_cdma_push, + TP_PROTO(const char *name, u32 op1, u32 op2), + + TP_ARGS(name, op1, op2), + + TP_STRUCT__entry( + __field(const char *, name) + __field(u32, op1) + __field(u32, op2) + ), + + TP_fast_assign( + __entry->name = name; + __entry->op1 = op1; + __entry->op2 = op2; + ), + + TP_printk("name=%s, op1=%08x, op2=%08x", + __entry->name, __entry->op1, __entry->op2) +); + +TRACE_EVENT(host1x_cdma_push_gather, + TP_PROTO(const char *name, struct host1x_bo *bo, + u32 words, u32 offset, void *cmdbuf), + + TP_ARGS(name, bo, words, offset, cmdbuf), + + TP_STRUCT__entry( + __field(const char *, name) + __field(struct host1x_bo *, bo) + __field(u32, words) + __field(u32, offset) + __field(bool, cmdbuf) + __dynamic_array(u32, cmdbuf, words) + ), + + TP_fast_assign( + if (cmdbuf) { + memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset, + words * sizeof(u32)); + } + __entry->cmdbuf = cmdbuf; + __entry->name = name; + __entry->bo = bo; + __entry->words = words; + __entry->offset = offset; + ), + + TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]", + __entry->name, __entry->bo, + __entry->words, __entry->offset, + __print_hex(__get_dynamic_array(cmdbuf), + __entry->cmdbuf ? __entry->words * 4 : 0)) +); + +TRACE_EVENT(host1x_channel_submit, + TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks, + u32 syncpt_id, u32 syncpt_incrs), + + TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs), + + TP_STRUCT__entry( + __field(const char *, name) + __field(u32, cmdbufs) + __field(u32, relocs) + __field(u32, waitchks) + __field(u32, syncpt_id) + __field(u32, syncpt_incrs) + ), + + TP_fast_assign( + __entry->name = name; + __entry->cmdbufs = cmdbufs; + __entry->relocs = relocs; + __entry->waitchks = waitchks; + __entry->syncpt_id = syncpt_id; + __entry->syncpt_incrs = syncpt_incrs; + ), + + TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d," + "syncpt_id=%u, syncpt_incrs=%u", + __entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks, + __entry->syncpt_id, __entry->syncpt_incrs) +); + +TRACE_EVENT(host1x_channel_submitted, + TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max), + + TP_ARGS(name, syncpt_base, syncpt_max), + + TP_STRUCT__entry( + __field(const char *, name) + __field(u32, syncpt_base) + __field(u32, syncpt_max) + ), + + TP_fast_assign( + __entry->name = name; + __entry->syncpt_base = syncpt_base; + __entry->syncpt_max = syncpt_max; + ), + + TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d", + __entry->name, __entry->syncpt_base, __entry->syncpt_max) +); + +TRACE_EVENT(host1x_channel_submit_complete, + TP_PROTO(const char *name, int count, u32 thresh), + + TP_ARGS(name, count, thresh), + + TP_STRUCT__entry( + __field(const char *, name) + __field(int, count) + __field(u32, thresh) + ), + + TP_fast_assign( + __entry->name = name; + __entry->count = count; + __entry->thresh = thresh; + ), + + TP_printk("name=%s, count=%d, thresh=%d", + __entry->name, __entry->count, __entry->thresh) +); + +TRACE_EVENT(host1x_wait_cdma, + TP_PROTO(const char *name, u32 eventid), + + TP_ARGS(name, eventid), + + TP_STRUCT__entry( + __field(const char *, name) + __field(u32, eventid) + ), + + TP_fast_assign( + __entry->name = name; + __entry->eventid = eventid; + ), + + TP_printk("name=%s, event=%d", __entry->name, __entry->eventid) +); + +TRACE_EVENT(host1x_syncpt_load_min, + TP_PROTO(u32 id, u32 val), + + TP_ARGS(id, val), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, val) + ), + + TP_fast_assign( + __entry->id = id; + __entry->val = val; + ), + + TP_printk("id=%d, val=%d", __entry->id, __entry->val) +); + +TRACE_EVENT(host1x_syncpt_wait_check, + TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh, + u32 min), + + TP_ARGS(bo, offset, syncpt_id, thresh, min), + + TP_STRUCT__entry( + __field(struct host1x_bo *, bo) + __field(u32, offset) + __field(u32, syncpt_id) + __field(u32, thresh) + __field(u32, min) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->offset = offset; + __entry->syncpt_id = syncpt_id; + __entry->thresh = thresh; + __entry->min = min; + ), + + TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d", + __entry->bo, __entry->offset, + __entry->syncpt_id, __entry->thresh, + __entry->min) +); + +#endif /* _TRACE_HOST1X_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/hswadsp.h b/kernel/include/trace/events/hswadsp.h new file mode 100644 index 000000000..0f78bbb02 --- /dev/null +++ b/kernel/include/trace/events/hswadsp.h @@ -0,0 +1,384 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hswadsp + +#if !defined(_TRACE_HSWADSP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HSWADSP_H + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +struct sst_hsw; +struct sst_hsw_stream; +struct sst_hsw_ipc_stream_free_req; +struct sst_hsw_ipc_volume_req; +struct sst_hsw_ipc_stream_alloc_req; +struct sst_hsw_audio_data_format_ipc; +struct sst_hsw_ipc_stream_info_reply; +struct sst_hsw_ipc_device_config_req; + +DECLARE_EVENT_CLASS(sst_irq, + + TP_PROTO(uint32_t status, uint32_t mask), + + TP_ARGS(status, mask), + + TP_STRUCT__entry( + __field( unsigned int, status ) + __field( unsigned int, mask ) + ), + + TP_fast_assign( + __entry->status = status; + __entry->mask = mask; + ), + + TP_printk("status 0x%8.8x mask 0x%8.8x", + (unsigned int)__entry->status, (unsigned int)__entry->mask) +); + +DEFINE_EVENT(sst_irq, sst_irq_busy, + + TP_PROTO(unsigned int status, unsigned int mask), + + TP_ARGS(status, mask) + +); + +DEFINE_EVENT(sst_irq, sst_irq_done, + + TP_PROTO(unsigned int status, unsigned int mask), + + TP_ARGS(status, mask) + +); + +DECLARE_EVENT_CLASS(ipc, + + TP_PROTO(const char *name, int val), + + TP_ARGS(name, val), + + TP_STRUCT__entry( + __string( name, name ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->val = val; + ), + + TP_printk("%s 0x%8.8x", __get_str(name), (unsigned int)__entry->val) + +); + +DEFINE_EVENT(ipc, ipc_request, + + TP_PROTO(const char *name, int val), + + TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_reply, + + TP_PROTO(const char *name, int val), + + TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_pending_reply, + + TP_PROTO(const char *name, int val), + + TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_notification, + + TP_PROTO(const char *name, int val), + + TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_error, + + TP_PROTO(const char *name, int val), + + TP_ARGS(name, val) + +); + +DECLARE_EVENT_CLASS(stream_position, + + TP_PROTO(unsigned int id, unsigned int pos), + + TP_ARGS(id, pos), + + TP_STRUCT__entry( + __field( unsigned int, id ) + __field( unsigned int, pos ) + ), + + TP_fast_assign( + __entry->id = id; + __entry->pos = pos; + ), + + TP_printk("id %d position 0x%x", + (unsigned int)__entry->id, (unsigned int)__entry->pos) +); + +DEFINE_EVENT(stream_position, stream_read_position, + + TP_PROTO(unsigned int id, unsigned int pos), + + TP_ARGS(id, pos) + +); + +DEFINE_EVENT(stream_position, stream_write_position, + + TP_PROTO(unsigned int id, unsigned int pos), + + TP_ARGS(id, pos) + +); + +TRACE_EVENT(hsw_stream_buffer, + + TP_PROTO(struct sst_hsw_stream *stream), + + TP_ARGS(stream), + + TP_STRUCT__entry( + __field( int, id ) + __field( int, pt_addr ) + __field( int, num_pages ) + __field( int, ring_size ) + __field( int, ring_offset ) + __field( int, first_pfn ) + ), + + TP_fast_assign( + __entry->id = stream->host_id; + __entry->pt_addr = stream->request.ringinfo.ring_pt_address; + __entry->num_pages = stream->request.ringinfo.num_pages; + __entry->ring_size = stream->request.ringinfo.ring_size; + __entry->ring_offset = stream->request.ringinfo.ring_offset; + __entry->first_pfn = stream->request.ringinfo.ring_first_pfn; + ), + + TP_printk("stream %d ring addr 0x%x pages %d size 0x%x offset 0x%x PFN 0x%x", + (int) __entry->id, (int)__entry->pt_addr, + (int)__entry->num_pages, (int)__entry->ring_size, + (int)__entry->ring_offset, (int)__entry->first_pfn) +); + +TRACE_EVENT(hsw_stream_alloc_reply, + + TP_PROTO(struct sst_hsw_stream *stream), + + TP_ARGS(stream), + + TP_STRUCT__entry( + __field( int, id ) + __field( int, stream_id ) + __field( int, mixer_id ) + __field( int, peak0 ) + __field( int, peak1 ) + __field( int, vol0 ) + __field( int, vol1 ) + ), + + TP_fast_assign( + __entry->id = stream->host_id; + __entry->stream_id = stream->reply.stream_hw_id; + __entry->mixer_id = stream->reply.mixer_hw_id; + __entry->peak0 = stream->reply.peak_meter_register_address[0]; + __entry->peak1 = stream->reply.peak_meter_register_address[1]; + __entry->vol0 = stream->reply.volume_register_address[0]; + __entry->vol1 = stream->reply.volume_register_address[1]; + ), + + TP_printk("stream %d hw id %d mixer %d peak 0x%x:0x%x vol 0x%x,0x%x", + (int) __entry->id, (int) __entry->stream_id, (int)__entry->mixer_id, + (int)__entry->peak0, (int)__entry->peak1, + (int)__entry->vol0, (int)__entry->vol1) +); + +TRACE_EVENT(hsw_mixer_info_reply, + + TP_PROTO(struct sst_hsw_ipc_stream_info_reply *reply), + + TP_ARGS(reply), + + TP_STRUCT__entry( + __field( int, mixer_id ) + __field( int, peak0 ) + __field( int, peak1 ) + __field( int, vol0 ) + __field( int, vol1 ) + ), + + TP_fast_assign( + __entry->mixer_id = reply->mixer_hw_id; + __entry->peak0 = reply->peak_meter_register_address[0]; + __entry->peak1 = reply->peak_meter_register_address[1]; + __entry->vol0 = reply->volume_register_address[0]; + __entry->vol1 = reply->volume_register_address[1]; + ), + + TP_printk("mixer id %d peak 0x%x:0x%x vol 0x%x,0x%x", + (int)__entry->mixer_id, + (int)__entry->peak0, (int)__entry->peak1, + (int)__entry->vol0, (int)__entry->vol1) +); + +TRACE_EVENT(hsw_stream_data_format, + + TP_PROTO(struct sst_hsw_stream *stream, + struct sst_hsw_audio_data_format_ipc *req), + + TP_ARGS(stream, req), + + TP_STRUCT__entry( + __field( uint32_t, id ) + __field( uint32_t, frequency ) + __field( uint32_t, bitdepth ) + __field( uint32_t, map ) + __field( uint32_t, config ) + __field( uint32_t, style ) + __field( uint8_t, ch_num ) + __field( uint8_t, valid_bit ) + ), + + TP_fast_assign( + __entry->id = stream->host_id; + __entry->frequency = req->frequency; + __entry->bitdepth = req->bitdepth; + __entry->map = req->map; + __entry->config = req->config; + __entry->style = req->style; + __entry->ch_num = req->ch_num; + __entry->valid_bit = req->valid_bit; + ), + + TP_printk("stream %d freq %d depth %d map 0x%x config 0x%x style 0x%x ch %d bits %d", + (int) __entry->id, (uint32_t)__entry->frequency, + (uint32_t)__entry->bitdepth, (uint32_t)__entry->map, + (uint32_t)__entry->config, (uint32_t)__entry->style, + (uint8_t)__entry->ch_num, (uint8_t)__entry->valid_bit) +); + +TRACE_EVENT(hsw_stream_alloc_request, + + TP_PROTO(struct sst_hsw_stream *stream, + struct sst_hsw_ipc_stream_alloc_req *req), + + TP_ARGS(stream, req), + + TP_STRUCT__entry( + __field( uint32_t, id ) + __field( uint8_t, path_id ) + __field( uint8_t, stream_type ) + __field( uint8_t, format_id ) + ), + + TP_fast_assign( + __entry->id = stream->host_id; + __entry->path_id = req->path_id; + __entry->stream_type = req->stream_type; + __entry->format_id = req->format_id; + ), + + TP_printk("stream %d path %d type %d format %d", + (int) __entry->id, (uint8_t)__entry->path_id, + (uint8_t)__entry->stream_type, (uint8_t)__entry->format_id) +); + +TRACE_EVENT(hsw_stream_free_req, + + TP_PROTO(struct sst_hsw_stream *stream, + struct sst_hsw_ipc_stream_free_req *req), + + TP_ARGS(stream, req), + + TP_STRUCT__entry( + __field( int, id ) + __field( int, stream_id ) + ), + + TP_fast_assign( + __entry->id = stream->host_id; + __entry->stream_id = req->stream_id; + ), + + TP_printk("stream %d hw id %d", + (int) __entry->id, (int) __entry->stream_id) +); + +TRACE_EVENT(hsw_volume_req, + + TP_PROTO(struct sst_hsw_stream *stream, + struct sst_hsw_ipc_volume_req *req), + + TP_ARGS(stream, req), + + TP_STRUCT__entry( + __field( int, id ) + __field( uint32_t, channel ) + __field( uint32_t, target_volume ) + __field( uint64_t, curve_duration ) + __field( uint32_t, curve_type ) + ), + + TP_fast_assign( + __entry->id = stream->host_id; + __entry->channel = req->channel; + __entry->target_volume = req->target_volume; + __entry->curve_duration = req->curve_duration; + __entry->curve_type = req->curve_type; + ), + + TP_printk("stream %d chan 0x%x vol %d duration %llu type %d", + (int) __entry->id, (uint32_t) __entry->channel, + (uint32_t)__entry->target_volume, + (uint64_t)__entry->curve_duration, + (uint32_t)__entry->curve_type) +); + +TRACE_EVENT(hsw_device_config_req, + + TP_PROTO(struct sst_hsw_ipc_device_config_req *req), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field( uint32_t, ssp ) + __field( uint32_t, clock_freq ) + __field( uint32_t, mode ) + __field( uint16_t, clock_divider ) + ), + + TP_fast_assign( + __entry->ssp = req->ssp_interface; + __entry->clock_freq = req->clock_frequency; + __entry->mode = req->mode; + __entry->clock_divider = req->clock_divider; + ), + + TP_printk("SSP %d Freq %d mode %d div %d", + (uint32_t)__entry->ssp, + (uint32_t)__entry->clock_freq, (uint32_t)__entry->mode, + (uint32_t)__entry->clock_divider) +); + +#endif /* _TRACE_HSWADSP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/i2c.h b/kernel/include/trace/events/i2c.h new file mode 100644 index 000000000..fe17187df --- /dev/null +++ b/kernel/include/trace/events/i2c.h @@ -0,0 +1,372 @@ +/* I2C and SMBUS message transfer tracepoints + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i2c + +#if !defined(_TRACE_I2C_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_I2C_H + +#include <linux/i2c.h> +#include <linux/tracepoint.h> + +/* + * drivers/i2c/i2c-core.c + */ +extern void i2c_transfer_trace_reg(void); +extern void i2c_transfer_trace_unreg(void); + +/* + * __i2c_transfer() write request + */ +TRACE_EVENT_FN(i2c_write, + TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, + int num), + TP_ARGS(adap, msg, num), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, msg_nr ) + __field(__u16, addr ) + __field(__u16, flags ) + __field(__u16, len ) + __dynamic_array(__u8, buf, msg->len) ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->msg_nr = num; + __entry->addr = msg->addr; + __entry->flags = msg->flags; + __entry->len = msg->len; + memcpy(__get_dynamic_array(buf), msg->buf, msg->len); + ), + TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]", + __entry->adapter_nr, + __entry->msg_nr, + __entry->addr, + __entry->flags, + __entry->len, + __entry->len, __get_dynamic_array(buf) + ), + i2c_transfer_trace_reg, + i2c_transfer_trace_unreg); + +/* + * __i2c_transfer() read request + */ +TRACE_EVENT_FN(i2c_read, + TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, + int num), + TP_ARGS(adap, msg, num), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, msg_nr ) + __field(__u16, addr ) + __field(__u16, flags ) + __field(__u16, len ) + ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->msg_nr = num; + __entry->addr = msg->addr; + __entry->flags = msg->flags; + __entry->len = msg->len; + ), + TP_printk("i2c-%d #%u a=%03x f=%04x l=%u", + __entry->adapter_nr, + __entry->msg_nr, + __entry->addr, + __entry->flags, + __entry->len + ), + i2c_transfer_trace_reg, + i2c_transfer_trace_unreg); + +/* + * __i2c_transfer() read reply + */ +TRACE_EVENT_FN(i2c_reply, + TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, + int num), + TP_ARGS(adap, msg, num), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, msg_nr ) + __field(__u16, addr ) + __field(__u16, flags ) + __field(__u16, len ) + __dynamic_array(__u8, buf, msg->len) ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->msg_nr = num; + __entry->addr = msg->addr; + __entry->flags = msg->flags; + __entry->len = msg->len; + memcpy(__get_dynamic_array(buf), msg->buf, msg->len); + ), + TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]", + __entry->adapter_nr, + __entry->msg_nr, + __entry->addr, + __entry->flags, + __entry->len, + __entry->len, __get_dynamic_array(buf) + ), + i2c_transfer_trace_reg, + i2c_transfer_trace_unreg); + +/* + * __i2c_transfer() result + */ +TRACE_EVENT_FN(i2c_result, + TP_PROTO(const struct i2c_adapter *adap, int num, int ret), + TP_ARGS(adap, num, ret), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, nr_msgs ) + __field(__s16, ret ) + ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->nr_msgs = num; + __entry->ret = ret; + ), + TP_printk("i2c-%d n=%u ret=%d", + __entry->adapter_nr, + __entry->nr_msgs, + __entry->ret + ), + i2c_transfer_trace_reg, + i2c_transfer_trace_unreg); + +/* + * i2c_smbus_xfer() write data or procedure call request + */ +TRACE_EVENT_CONDITION(smbus_write, + TP_PROTO(const struct i2c_adapter *adap, + u16 addr, unsigned short flags, + char read_write, u8 command, int protocol, + const union i2c_smbus_data *data), + TP_ARGS(adap, addr, flags, read_write, command, protocol, data), + TP_CONDITION(read_write == I2C_SMBUS_WRITE || + protocol == I2C_SMBUS_PROC_CALL || + protocol == I2C_SMBUS_BLOCK_PROC_CALL), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, addr ) + __field(__u16, flags ) + __field(__u8, command ) + __field(__u8, len ) + __field(__u32, protocol ) + __array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->addr = addr; + __entry->flags = flags; + __entry->command = command; + __entry->protocol = protocol; + + switch (protocol) { + case I2C_SMBUS_BYTE_DATA: + __entry->len = 1; + goto copy; + case I2C_SMBUS_WORD_DATA: + case I2C_SMBUS_PROC_CALL: + __entry->len = 2; + goto copy; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_BLOCK_PROC_CALL: + case I2C_SMBUS_I2C_BLOCK_DATA: + __entry->len = data->block[0] + 1; + copy: + memcpy(__entry->buf, data->block, __entry->len); + break; + case I2C_SMBUS_QUICK: + case I2C_SMBUS_BYTE: + case I2C_SMBUS_I2C_BLOCK_BROKEN: + default: + __entry->len = 0; + } + ), + TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]", + __entry->adapter_nr, + __entry->addr, + __entry->flags, + __entry->command, + __print_symbolic(__entry->protocol, + { I2C_SMBUS_QUICK, "QUICK" }, + { I2C_SMBUS_BYTE, "BYTE" }, + { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, + { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, + { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, + { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, + { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, + { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, + { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }), + __entry->len, + __entry->len, __entry->buf + )); + +/* + * i2c_smbus_xfer() read data request + */ +TRACE_EVENT_CONDITION(smbus_read, + TP_PROTO(const struct i2c_adapter *adap, + u16 addr, unsigned short flags, + char read_write, u8 command, int protocol), + TP_ARGS(adap, addr, flags, read_write, command, protocol), + TP_CONDITION(!(read_write == I2C_SMBUS_WRITE || + protocol == I2C_SMBUS_PROC_CALL || + protocol == I2C_SMBUS_BLOCK_PROC_CALL)), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, flags ) + __field(__u16, addr ) + __field(__u8, command ) + __field(__u32, protocol ) + __array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->addr = addr; + __entry->flags = flags; + __entry->command = command; + __entry->protocol = protocol; + ), + TP_printk("i2c-%d a=%03x f=%04x c=%x %s", + __entry->adapter_nr, + __entry->addr, + __entry->flags, + __entry->command, + __print_symbolic(__entry->protocol, + { I2C_SMBUS_QUICK, "QUICK" }, + { I2C_SMBUS_BYTE, "BYTE" }, + { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, + { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, + { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, + { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, + { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, + { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, + { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }) + )); + +/* + * i2c_smbus_xfer() read data or procedure call reply + */ +TRACE_EVENT_CONDITION(smbus_reply, + TP_PROTO(const struct i2c_adapter *adap, + u16 addr, unsigned short flags, + char read_write, u8 command, int protocol, + const union i2c_smbus_data *data), + TP_ARGS(adap, addr, flags, read_write, command, protocol, data), + TP_CONDITION(read_write == I2C_SMBUS_READ), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, addr ) + __field(__u16, flags ) + __field(__u8, command ) + __field(__u8, len ) + __field(__u32, protocol ) + __array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->addr = addr; + __entry->flags = flags; + __entry->command = command; + __entry->protocol = protocol; + + switch (protocol) { + case I2C_SMBUS_BYTE: + case I2C_SMBUS_BYTE_DATA: + __entry->len = 1; + goto copy; + case I2C_SMBUS_WORD_DATA: + case I2C_SMBUS_PROC_CALL: + __entry->len = 2; + goto copy; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_BLOCK_PROC_CALL: + case I2C_SMBUS_I2C_BLOCK_DATA: + __entry->len = data->block[0] + 1; + copy: + memcpy(__entry->buf, data->block, __entry->len); + break; + case I2C_SMBUS_QUICK: + case I2C_SMBUS_I2C_BLOCK_BROKEN: + default: + __entry->len = 0; + } + ), + TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]", + __entry->adapter_nr, + __entry->addr, + __entry->flags, + __entry->command, + __print_symbolic(__entry->protocol, + { I2C_SMBUS_QUICK, "QUICK" }, + { I2C_SMBUS_BYTE, "BYTE" }, + { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, + { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, + { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, + { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, + { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, + { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, + { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }), + __entry->len, + __entry->len, __entry->buf + )); + +/* + * i2c_smbus_xfer() result + */ +TRACE_EVENT(smbus_result, + TP_PROTO(const struct i2c_adapter *adap, + u16 addr, unsigned short flags, + char read_write, u8 command, int protocol, + int res), + TP_ARGS(adap, addr, flags, read_write, command, protocol, res), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(__u16, addr ) + __field(__u16, flags ) + __field(__u8, read_write ) + __field(__u8, command ) + __field(__s16, res ) + __field(__u32, protocol ) + ), + TP_fast_assign( + __entry->adapter_nr = adap->nr; + __entry->addr = addr; + __entry->flags = flags; + __entry->read_write = read_write; + __entry->command = command; + __entry->protocol = protocol; + __entry->res = res; + ), + TP_printk("i2c-%d a=%03x f=%04x c=%x %s %s res=%d", + __entry->adapter_nr, + __entry->addr, + __entry->flags, + __entry->command, + __print_symbolic(__entry->protocol, + { I2C_SMBUS_QUICK, "QUICK" }, + { I2C_SMBUS_BYTE, "BYTE" }, + { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, + { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, + { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, + { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, + { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, + { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, + { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }), + __entry->read_write == I2C_SMBUS_WRITE ? "wr" : "rd", + __entry->res + )); + +#endif /* _TRACE_I2C_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/intel-sst.h b/kernel/include/trace/events/intel-sst.h new file mode 100644 index 000000000..edc24e6de --- /dev/null +++ b/kernel/include/trace/events/intel-sst.h @@ -0,0 +1,155 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM intel-sst + +/* + * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a + * legitimate C variable. It is not exported to user space. + */ +#undef TRACE_SYSTEM_VAR +#define TRACE_SYSTEM_VAR intel_sst + +#if !defined(_TRACE_INTEL_SST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INTEL_SST_H + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(sst_ipc_msg, + + TP_PROTO(unsigned int val), + + TP_ARGS(val), + + TP_STRUCT__entry( + __field( unsigned int, val ) + ), + + TP_fast_assign( + __entry->val = val; + ), + + TP_printk("0x%8.8x", (unsigned int)__entry->val) +); + +DEFINE_EVENT(sst_ipc_msg, sst_ipc_msg_tx, + + TP_PROTO(unsigned int val), + + TP_ARGS(val) + +); + +DEFINE_EVENT(sst_ipc_msg, sst_ipc_msg_rx, + + TP_PROTO(unsigned int val), + + TP_ARGS(val) + +); + +DECLARE_EVENT_CLASS(sst_ipc_mailbox, + + TP_PROTO(unsigned int offset, unsigned int val), + + TP_ARGS(offset, val), + + TP_STRUCT__entry( + __field( unsigned int, offset ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __entry->offset = offset; + __entry->val = val; + ), + + TP_printk(" 0x%4.4x = 0x%8.8x", + (unsigned int)__entry->offset, (unsigned int)__entry->val) +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_inbox_rdata, + + TP_PROTO(unsigned int offset, unsigned int val), + + TP_ARGS(offset, val) + +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_inbox_wdata, + + TP_PROTO(unsigned int offset, unsigned int val), + + TP_ARGS(offset, val) + +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_outbox_rdata, + + TP_PROTO(unsigned int offset, unsigned int val), + + TP_ARGS(offset, val) + +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_outbox_wdata, + + TP_PROTO(unsigned int offset, unsigned int val), + + TP_ARGS(offset, val) + +); + +DECLARE_EVENT_CLASS(sst_ipc_mailbox_info, + + TP_PROTO(unsigned int size), + + TP_ARGS(size), + + TP_STRUCT__entry( + __field( unsigned int, size ) + ), + + TP_fast_assign( + __entry->size = size; + ), + + TP_printk("Mailbox bytes 0x%8.8x", (unsigned int)__entry->size) +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_inbox_read, + + TP_PROTO(unsigned int size), + + TP_ARGS(size) + +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_inbox_write, + + TP_PROTO(unsigned int size), + + TP_ARGS(size) + +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_outbox_read, + + TP_PROTO(unsigned int size), + + TP_ARGS(size) + +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_outbox_write, + + TP_PROTO(unsigned int size), + + TP_ARGS(size) + +); + +#endif /* _TRACE_SST_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/iommu.h b/kernel/include/trace/events/iommu.h new file mode 100644 index 000000000..2c7befb10 --- /dev/null +++ b/kernel/include/trace/events/iommu.h @@ -0,0 +1,167 @@ +/* + * iommu trace points + * + * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> + * + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iommu + +#if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IOMMU_H + +#include <linux/tracepoint.h> +#include <linux/pci.h> + +struct device; + +DECLARE_EVENT_CLASS(iommu_group_event, + + TP_PROTO(int group_id, struct device *dev), + + TP_ARGS(group_id, dev), + + TP_STRUCT__entry( + __field(int, gid) + __string(device, dev_name(dev)) + ), + + TP_fast_assign( + __entry->gid = group_id; + __assign_str(device, dev_name(dev)); + ), + + TP_printk("IOMMU: groupID=%d device=%s", + __entry->gid, __get_str(device) + ) +); + +DEFINE_EVENT(iommu_group_event, add_device_to_group, + + TP_PROTO(int group_id, struct device *dev), + + TP_ARGS(group_id, dev) + +); + +DEFINE_EVENT(iommu_group_event, remove_device_from_group, + + TP_PROTO(int group_id, struct device *dev), + + TP_ARGS(group_id, dev) +); + +DECLARE_EVENT_CLASS(iommu_device_event, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + ), + + TP_fast_assign( + __assign_str(device, dev_name(dev)); + ), + + TP_printk("IOMMU: device=%s", __get_str(device) + ) +); + +DEFINE_EVENT(iommu_device_event, attach_device_to_domain, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev) +); + +DEFINE_EVENT(iommu_device_event, detach_device_from_domain, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev) +); + +TRACE_EVENT(map, + + TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), + + TP_ARGS(iova, paddr, size), + + TP_STRUCT__entry( + __field(u64, iova) + __field(u64, paddr) + __field(size_t, size) + ), + + TP_fast_assign( + __entry->iova = iova; + __entry->paddr = paddr; + __entry->size = size; + ), + + TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", + __entry->iova, __entry->paddr, __entry->size + ) +); + +TRACE_EVENT(unmap, + + TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), + + TP_ARGS(iova, size, unmapped_size), + + TP_STRUCT__entry( + __field(u64, iova) + __field(size_t, size) + __field(size_t, unmapped_size) + ), + + TP_fast_assign( + __entry->iova = iova; + __entry->size = size; + __entry->unmapped_size = unmapped_size; + ), + + TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", + __entry->iova, __entry->size, __entry->unmapped_size + ) +); + +DECLARE_EVENT_CLASS(iommu_error, + + TP_PROTO(struct device *dev, unsigned long iova, int flags), + + TP_ARGS(dev, iova, flags), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __string(driver, dev_driver_string(dev)) + __field(u64, iova) + __field(int, flags) + ), + + TP_fast_assign( + __assign_str(device, dev_name(dev)); + __assign_str(driver, dev_driver_string(dev)); + __entry->iova = iova; + __entry->flags = flags; + ), + + TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x", + __get_str(driver), __get_str(device), + __entry->iova, __entry->flags + ) +); + +DEFINE_EVENT(iommu_error, io_page_fault, + + TP_PROTO(struct device *dev, unsigned long iova, int flags), + + TP_ARGS(dev, iova, flags) +); +#endif /* _TRACE_IOMMU_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/ipi.h b/kernel/include/trace/events/ipi.h new file mode 100644 index 000000000..834a7362a --- /dev/null +++ b/kernel/include/trace/events/ipi.h @@ -0,0 +1,89 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipi + +#if !defined(_TRACE_IPI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IPI_H + +#include <linux/tracepoint.h> + +/** + * ipi_raise - called when a smp cross call is made + * + * @mask: mask of recipient CPUs for the IPI + * @reason: string identifying the IPI purpose + * + * It is necessary for @reason to be a static string declared with + * __tracepoint_string. + */ +TRACE_EVENT(ipi_raise, + + TP_PROTO(const struct cpumask *mask, const char *reason), + + TP_ARGS(mask, reason), + + TP_STRUCT__entry( + __bitmask(target_cpus, nr_cpumask_bits) + __field(const char *, reason) + ), + + TP_fast_assign( + __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits); + __entry->reason = reason; + ), + + TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason) +); + +DECLARE_EVENT_CLASS(ipi_handler, + + TP_PROTO(const char *reason), + + TP_ARGS(reason), + + TP_STRUCT__entry( + __field(const char *, reason) + ), + + TP_fast_assign( + __entry->reason = reason; + ), + + TP_printk("(%s)", __entry->reason) +); + +/** + * ipi_entry - called immediately before the IPI handler + * + * @reason: string identifying the IPI purpose + * + * It is necessary for @reason to be a static string declared with + * __tracepoint_string, ideally the same as used with trace_ipi_raise + * for that IPI. + */ +DEFINE_EVENT(ipi_handler, ipi_entry, + + TP_PROTO(const char *reason), + + TP_ARGS(reason) +); + +/** + * ipi_exit - called immediately after the IPI handler returns + * + * @reason: string identifying the IPI purpose + * + * It is necessary for @reason to be a static string declared with + * __tracepoint_string, ideally the same as used with trace_ipi_raise for + * that IPI. + */ +DEFINE_EVENT(ipi_handler, ipi_exit, + + TP_PROTO(const char *reason), + + TP_ARGS(reason) +); + +#endif /* _TRACE_IPI_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/irq.h b/kernel/include/trace/events/irq.h new file mode 100644 index 000000000..ff8f6c091 --- /dev/null +++ b/kernel/include/trace/events/irq.h @@ -0,0 +1,165 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM irq + +#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IRQ_H + +#include <linux/tracepoint.h> + +struct irqaction; +struct softirq_action; + +#define SOFTIRQ_NAME_LIST \ + softirq_name(HI) \ + softirq_name(TIMER) \ + softirq_name(NET_TX) \ + softirq_name(NET_RX) \ + softirq_name(BLOCK) \ + softirq_name(BLOCK_IOPOLL) \ + softirq_name(TASKLET) \ + softirq_name(SCHED) \ + softirq_name(HRTIMER) \ + softirq_name_end(RCU) + +#undef softirq_name +#undef softirq_name_end + +#define softirq_name(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ); +#define softirq_name_end(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ); + +SOFTIRQ_NAME_LIST + +#undef softirq_name +#undef softirq_name_end + +#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }, +#define softirq_name_end(sirq) { sirq##_SOFTIRQ, #sirq } + +#define show_softirq_name(val) \ + __print_symbolic(val, SOFTIRQ_NAME_LIST) + +/** + * irq_handler_entry - called immediately before the irq action handler + * @irq: irq number + * @action: pointer to struct irqaction + * + * The struct irqaction pointed to by @action contains various + * information about the handler, including the device name, + * @action->name, and the device id, @action->dev_id. When used in + * conjunction with the irq_handler_exit tracepoint, we can figure + * out irq handler latencies. + */ +TRACE_EVENT(irq_handler_entry, + + TP_PROTO(int irq, struct irqaction *action), + + TP_ARGS(irq, action), + + TP_STRUCT__entry( + __field( int, irq ) + __string( name, action->name ) + ), + + TP_fast_assign( + __entry->irq = irq; + __assign_str(name, action->name); + ), + + TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) +); + +/** + * irq_handler_exit - called immediately after the irq action handler returns + * @irq: irq number + * @action: pointer to struct irqaction + * @ret: return value + * + * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding + * @action->handler scuccessully handled this irq. Otherwise, the irq might be + * a shared irq line, or the irq was not handled successfully. Can be used in + * conjunction with the irq_handler_entry to understand irq handler latencies. + */ +TRACE_EVENT(irq_handler_exit, + + TP_PROTO(int irq, struct irqaction *action, int ret), + + TP_ARGS(irq, action, ret), + + TP_STRUCT__entry( + __field( int, irq ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->irq = irq; + __entry->ret = ret; + ), + + TP_printk("irq=%d ret=%s", + __entry->irq, __entry->ret ? "handled" : "unhandled") +); + +DECLARE_EVENT_CLASS(softirq, + + TP_PROTO(unsigned int vec_nr), + + TP_ARGS(vec_nr), + + TP_STRUCT__entry( + __field( unsigned int, vec ) + ), + + TP_fast_assign( + __entry->vec = vec_nr; + ), + + TP_printk("vec=%u [action=%s]", __entry->vec, + show_softirq_name(__entry->vec)) +); + +/** + * softirq_entry - called immediately before the softirq handler + * @vec_nr: softirq vector number + * + * When used in combination with the softirq_exit tracepoint + * we can determine the softirq handler routine. + */ +DEFINE_EVENT(softirq, softirq_entry, + + TP_PROTO(unsigned int vec_nr), + + TP_ARGS(vec_nr) +); + +/** + * softirq_exit - called immediately after the softirq handler returns + * @vec_nr: softirq vector number + * + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq handler routine. + */ +DEFINE_EVENT(softirq, softirq_exit, + + TP_PROTO(unsigned int vec_nr), + + TP_ARGS(vec_nr) +); + +/** + * softirq_raise - called immediately when a softirq is raised + * @vec_nr: softirq vector number + * + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq raise to run latency. + */ +DEFINE_EVENT(softirq, softirq_raise, + + TP_PROTO(unsigned int vec_nr), + + TP_ARGS(vec_nr) +); + +#endif /* _TRACE_IRQ_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/jbd.h b/kernel/include/trace/events/jbd.h new file mode 100644 index 000000000..da6f2591c --- /dev/null +++ b/kernel/include/trace/events/jbd.h @@ -0,0 +1,194 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM jbd + +#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_JBD_H + +#include <linux/jbd.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(jbd_checkpoint, + + TP_PROTO(journal_t *journal, int result), + + TP_ARGS(journal, result), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, result ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->result = result; + ), + + TP_printk("dev %d,%d result %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->result) +); + +DECLARE_EVENT_CLASS(jbd_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_start_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_locking, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_flushing, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_logging, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +TRACE_EVENT(jbd_drop_transaction, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction) +); + +TRACE_EVENT(jbd_end_commit, + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, transaction ) + __field( int, head ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->transaction = commit_transaction->t_tid; + __entry->head = journal->j_tail_sequence; + ), + + TP_printk("dev %d,%d transaction %d head %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->head) +); + +TRACE_EVENT(jbd_do_submit_data, + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction) +); + +TRACE_EVENT(jbd_cleanup_journal_tail, + + TP_PROTO(journal_t *journal, tid_t first_tid, + unsigned long block_nr, unsigned long freed), + + TP_ARGS(journal, first_tid, block_nr, freed), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( tid_t, tail_sequence ) + __field( tid_t, first_tid ) + __field(unsigned long, block_nr ) + __field(unsigned long, freed ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->tail_sequence = journal->j_tail_sequence; + __entry->first_tid = first_tid; + __entry->block_nr = block_nr; + __entry->freed = freed; + ), + + TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tail_sequence, __entry->first_tid, + __entry->block_nr, __entry->freed) +); + +TRACE_EVENT(journal_write_superblock, + TP_PROTO(journal_t *journal, int write_op), + + TP_ARGS(journal, write_op), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, write_op ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->write_op = write_op; + ), + + TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->write_op) +); + +#endif /* _TRACE_JBD_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/jbd2.h b/kernel/include/trace/events/jbd2.h new file mode 100644 index 000000000..c1d1f3eb2 --- /dev/null +++ b/kernel/include/trace/events/jbd2.h @@ -0,0 +1,385 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM jbd2 + +#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_JBD2_H + +#include <linux/jbd2.h> +#include <linux/tracepoint.h> + +struct transaction_chp_stats_s; +struct transaction_run_stats_s; + +TRACE_EVENT(jbd2_checkpoint, + + TP_PROTO(journal_t *journal, int result), + + TP_ARGS(journal, result), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, result ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->result = result; + ), + + TP_printk("dev %d,%d result %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result) +); + +DECLARE_EVENT_CLASS(jbd2_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) + __field( int, transaction ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->sync_commit = commit_transaction->t_synchronous_commit; + __entry->transaction = commit_transaction->t_tid; + ), + + TP_printk("dev %d,%d transaction %d sync %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit) +); + +DEFINE_EVENT(jbd2_commit, jbd2_start_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +TRACE_EVENT(jbd2_end_commit, + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) + __field( int, transaction ) + __field( int, head ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->sync_commit = commit_transaction->t_synchronous_commit; + __entry->transaction = commit_transaction->t_tid; + __entry->head = journal->j_tail_sequence; + ), + + TP_printk("dev %d,%d transaction %d sync %d head %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit, __entry->head) +); + +TRACE_EVENT(jbd2_submit_inode_data, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + ), + + TP_printk("dev %d,%d ino %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino) +); + +TRACE_EVENT(jbd2_handle_start, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int requested_blocks), + + TP_ARGS(dev, tid, type, line_no, requested_blocks), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned int, type ) + __field( unsigned int, line_no ) + __field( int, requested_blocks) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->requested_blocks = requested_blocks; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u " + "requested_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->requested_blocks) +); + +TRACE_EVENT(jbd2_handle_extend, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int buffer_credits, + int requested_blocks), + + TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned int, type ) + __field( unsigned int, line_no ) + __field( int, buffer_credits ) + __field( int, requested_blocks) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->buffer_credits = buffer_credits; + __entry->requested_blocks = requested_blocks; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u " + "buffer_credits %d requested_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->buffer_credits, + __entry->requested_blocks) +); + +TRACE_EVENT(jbd2_handle_stats, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int interval, int sync, + int requested_blocks, int dirtied_blocks), + + TP_ARGS(dev, tid, type, line_no, interval, sync, + requested_blocks, dirtied_blocks), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned int, type ) + __field( unsigned int, line_no ) + __field( int, interval ) + __field( int, sync ) + __field( int, requested_blocks) + __field( int, dirtied_blocks ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->interval = interval; + __entry->sync = sync; + __entry->requested_blocks = requested_blocks; + __entry->dirtied_blocks = dirtied_blocks; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + "sync %d requested_blocks %d dirtied_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->interval, + __entry->sync, __entry->requested_blocks, + __entry->dirtied_blocks) +); + +TRACE_EVENT(jbd2_run_stats, + TP_PROTO(dev_t dev, unsigned long tid, + struct transaction_run_stats_s *stats), + + TP_ARGS(dev, tid, stats), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned long, wait ) + __field( unsigned long, request_delay ) + __field( unsigned long, running ) + __field( unsigned long, locked ) + __field( unsigned long, flushing ) + __field( unsigned long, logging ) + __field( __u32, handle_count ) + __field( __u32, blocks ) + __field( __u32, blocks_logged ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->wait = stats->rs_wait; + __entry->request_delay = stats->rs_request_delay; + __entry->running = stats->rs_running; + __entry->locked = stats->rs_locked; + __entry->flushing = stats->rs_flushing; + __entry->logging = stats->rs_logging; + __entry->handle_count = stats->rs_handle_count; + __entry->blocks = stats->rs_blocks; + __entry->blocks_logged = stats->rs_blocks_logged; + ), + + TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u " + "locked %u flushing %u logging %u handle_count %u " + "blocks %u blocks_logged %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + jiffies_to_msecs(__entry->wait), + jiffies_to_msecs(__entry->request_delay), + jiffies_to_msecs(__entry->running), + jiffies_to_msecs(__entry->locked), + jiffies_to_msecs(__entry->flushing), + jiffies_to_msecs(__entry->logging), + __entry->handle_count, __entry->blocks, + __entry->blocks_logged) +); + +TRACE_EVENT(jbd2_checkpoint_stats, + TP_PROTO(dev_t dev, unsigned long tid, + struct transaction_chp_stats_s *stats), + + TP_ARGS(dev, tid, stats), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned long, chp_time ) + __field( __u32, forced_to_close ) + __field( __u32, written ) + __field( __u32, dropped ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->chp_time = stats->cs_chp_time; + __entry->forced_to_close= stats->cs_forced_to_close; + __entry->written = stats->cs_written; + __entry->dropped = stats->cs_dropped; + ), + + TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " + "written %u dropped %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + jiffies_to_msecs(__entry->chp_time), + __entry->forced_to_close, __entry->written, __entry->dropped) +); + +TRACE_EVENT(jbd2_update_log_tail, + + TP_PROTO(journal_t *journal, tid_t first_tid, + unsigned long block_nr, unsigned long freed), + + TP_ARGS(journal, first_tid, block_nr, freed), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( tid_t, tail_sequence ) + __field( tid_t, first_tid ) + __field(unsigned long, block_nr ) + __field(unsigned long, freed ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->tail_sequence = journal->j_tail_sequence; + __entry->first_tid = first_tid; + __entry->block_nr = block_nr; + __entry->freed = freed; + ), + + TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tail_sequence, __entry->first_tid, + __entry->block_nr, __entry->freed) +); + +TRACE_EVENT(jbd2_write_superblock, + + TP_PROTO(journal_t *journal, int write_op), + + TP_ARGS(journal, write_op), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, write_op ) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->write_op = write_op; + ), + + TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->write_op) +); + +TRACE_EVENT(jbd2_lock_buffer_stall, + + TP_PROTO(dev_t dev, unsigned long stall_ms), + + TP_ARGS(dev, stall_ms), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field(unsigned long, stall_ms ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->stall_ms = stall_ms; + ), + + TP_printk("dev %d,%d stall_ms %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->stall_ms) +); + +#endif /* _TRACE_JBD2_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/kmem.h b/kernel/include/trace/events/kmem.h new file mode 100644 index 000000000..f7554fd7f --- /dev/null +++ b/kernel/include/trace/events/kmem.h @@ -0,0 +1,357 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kmem + +#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KMEM_H + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> + +DECLARE_EVENT_CLASS(kmem_alloc, + + TP_PROTO(unsigned long call_site, + const void *ptr, + size_t bytes_req, + size_t bytes_alloc, + gfp_t gfp_flags), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), + + TP_STRUCT__entry( + __field( unsigned long, call_site ) + __field( const void *, ptr ) + __field( size_t, bytes_req ) + __field( size_t, bytes_alloc ) + __field( gfp_t, gfp_flags ) + ), + + TP_fast_assign( + __entry->call_site = call_site; + __entry->ptr = ptr; + __entry->bytes_req = bytes_req; + __entry->bytes_alloc = bytes_alloc; + __entry->gfp_flags = gfp_flags; + ), + + TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", + __entry->call_site, + __entry->ptr, + __entry->bytes_req, + __entry->bytes_alloc, + show_gfp_flags(__entry->gfp_flags)) +); + +DEFINE_EVENT(kmem_alloc, kmalloc, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) +); + +DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) +); + +DECLARE_EVENT_CLASS(kmem_alloc_node, + + TP_PROTO(unsigned long call_site, + const void *ptr, + size_t bytes_req, + size_t bytes_alloc, + gfp_t gfp_flags, + int node), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), + + TP_STRUCT__entry( + __field( unsigned long, call_site ) + __field( const void *, ptr ) + __field( size_t, bytes_req ) + __field( size_t, bytes_alloc ) + __field( gfp_t, gfp_flags ) + __field( int, node ) + ), + + TP_fast_assign( + __entry->call_site = call_site; + __entry->ptr = ptr; + __entry->bytes_req = bytes_req; + __entry->bytes_alloc = bytes_alloc; + __entry->gfp_flags = gfp_flags; + __entry->node = node; + ), + + TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", + __entry->call_site, + __entry->ptr, + __entry->bytes_req, + __entry->bytes_alloc, + show_gfp_flags(__entry->gfp_flags), + __entry->node) +); + +DEFINE_EVENT(kmem_alloc_node, kmalloc_node, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, + gfp_t gfp_flags, int node), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) +); + +DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, + gfp_t gfp_flags, int node), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) +); + +DECLARE_EVENT_CLASS(kmem_free, + + TP_PROTO(unsigned long call_site, const void *ptr), + + TP_ARGS(call_site, ptr), + + TP_STRUCT__entry( + __field( unsigned long, call_site ) + __field( const void *, ptr ) + ), + + TP_fast_assign( + __entry->call_site = call_site; + __entry->ptr = ptr; + ), + + TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) +); + +DEFINE_EVENT(kmem_free, kfree, + + TP_PROTO(unsigned long call_site, const void *ptr), + + TP_ARGS(call_site, ptr) +); + +DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, + + TP_PROTO(unsigned long call_site, const void *ptr), + + TP_ARGS(call_site, ptr), + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())) +); + +TRACE_EVENT_CONDITION(mm_page_free, + + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( unsigned int, order ) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu order=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + __entry->order) +); + +TRACE_EVENT(mm_page_free_batched, + + TP_PROTO(struct page *page, int cold), + + TP_ARGS(page, cold), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( int, cold ) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->cold = cold; + ), + + TP_printk("page=%p pfn=%lu order=0 cold=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + __entry->cold) +); + +TRACE_EVENT(mm_page_alloc, + + TP_PROTO(struct page *page, unsigned int order, + gfp_t gfp_flags, int migratetype), + + TP_ARGS(page, order, gfp_flags, migratetype), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( unsigned int, order ) + __field( gfp_t, gfp_flags ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->pfn = page ? page_to_pfn(page) : -1UL; + __entry->order = order; + __entry->gfp_flags = gfp_flags; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, + __entry->pfn != -1UL ? __entry->pfn : 0, + __entry->order, + __entry->migratetype, + show_gfp_flags(__entry->gfp_flags)) +); + +DECLARE_EVENT_CLASS(mm_page, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->pfn = page ? page_to_pfn(page) : -1UL; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", + __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, + __entry->pfn != -1UL ? __entry->pfn : 0, + __entry->order, + __entry->migratetype, + __entry->order == 0) +); + +DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype) +); + +TRACE_EVENT_CONDITION(mm_page_pcpu_drain, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->pfn = page ? page_to_pfn(page) : -1UL; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d", + pfn_to_page(__entry->pfn), __entry->pfn, + __entry->order, __entry->migratetype) +); + +TRACE_EVENT(mm_page_alloc_extfrag, + + TP_PROTO(struct page *page, + int alloc_order, int fallback_order, + int alloc_migratetype, int fallback_migratetype), + + TP_ARGS(page, + alloc_order, fallback_order, + alloc_migratetype, fallback_migratetype), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( int, alloc_order ) + __field( int, fallback_order ) + __field( int, alloc_migratetype ) + __field( int, fallback_migratetype ) + __field( int, change_ownership ) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->alloc_order = alloc_order; + __entry->fallback_order = fallback_order; + __entry->alloc_migratetype = alloc_migratetype; + __entry->fallback_migratetype = fallback_migratetype; + __entry->change_ownership = (alloc_migratetype == + get_pageblock_migratetype(page)); + ), + + TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + __entry->alloc_order, + __entry->fallback_order, + pageblock_order, + __entry->alloc_migratetype, + __entry->fallback_migratetype, + __entry->fallback_order < pageblock_order, + __entry->change_ownership) +); + +#endif /* _TRACE_KMEM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/kvm.h b/kernel/include/trace/events/kvm.h new file mode 100644 index 000000000..a44062da6 --- /dev/null +++ b/kernel/include/trace/events/kvm.h @@ -0,0 +1,364 @@ +#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_MAIN_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x } + +#define kvm_trace_exit_reason \ + ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \ + ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \ + ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ + ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ + ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ + ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH) + +TRACE_EVENT(kvm_userspace_exit, + TP_PROTO(__u32 reason, int errno), + TP_ARGS(reason, errno), + + TP_STRUCT__entry( + __field( __u32, reason ) + __field( int, errno ) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->errno = errno; + ), + + TP_printk("reason %s (%d)", + __entry->errno < 0 ? + (__entry->errno == -EINTR ? "restart" : "error") : + __print_symbolic(__entry->reason, kvm_trace_exit_reason), + __entry->errno < 0 ? -__entry->errno : __entry->reason) +); + +TRACE_EVENT(kvm_vcpu_wakeup, + TP_PROTO(__u64 ns, bool waited), + TP_ARGS(ns, waited), + + TP_STRUCT__entry( + __field( __u64, ns ) + __field( bool, waited ) + ), + + TP_fast_assign( + __entry->ns = ns; + __entry->waited = waited; + ), + + TP_printk("%s time %lld ns", + __entry->waited ? "wait" : "poll", + __entry->ns) +); + +#if defined(CONFIG_HAVE_KVM_IRQFD) +TRACE_EVENT(kvm_set_irq, + TP_PROTO(unsigned int gsi, int level, int irq_source_id), + TP_ARGS(gsi, level, irq_source_id), + + TP_STRUCT__entry( + __field( unsigned int, gsi ) + __field( int, level ) + __field( int, irq_source_id ) + ), + + TP_fast_assign( + __entry->gsi = gsi; + __entry->level = level; + __entry->irq_source_id = irq_source_id; + ), + + TP_printk("gsi %u level %d source %d", + __entry->gsi, __entry->level, __entry->irq_source_id) +); +#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ + +#if defined(__KVM_HAVE_IOAPIC) +#define kvm_deliver_mode \ + {0x0, "Fixed"}, \ + {0x1, "LowPrio"}, \ + {0x2, "SMI"}, \ + {0x3, "Res3"}, \ + {0x4, "NMI"}, \ + {0x5, "INIT"}, \ + {0x6, "SIPI"}, \ + {0x7, "ExtINT"} + +TRACE_EVENT(kvm_ioapic_set_irq, + TP_PROTO(__u64 e, int pin, bool coalesced), + TP_ARGS(e, pin, coalesced), + + TP_STRUCT__entry( + __field( __u64, e ) + __field( int, pin ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->e = e; + __entry->pin = pin; + __entry->coalesced = coalesced; + ), + + TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s", + __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e, + __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), + (__entry->e & (1<<11)) ? "logical" : "physical", + (__entry->e & (1<<15)) ? "level" : "edge", + (__entry->e & (1<<16)) ? "|masked" : "", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_ioapic_delayed_eoi_inj, + TP_PROTO(__u64 e), + TP_ARGS(e), + + TP_STRUCT__entry( + __field( __u64, e ) + ), + + TP_fast_assign( + __entry->e = e; + ), + + TP_printk("dst %x vec=%u (%s|%s|%s%s)", + (u8)(__entry->e >> 56), (u8)__entry->e, + __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), + (__entry->e & (1<<11)) ? "logical" : "physical", + (__entry->e & (1<<15)) ? "level" : "edge", + (__entry->e & (1<<16)) ? "|masked" : "") +); + +TRACE_EVENT(kvm_msi_set_irq, + TP_PROTO(__u64 address, __u64 data), + TP_ARGS(address, data), + + TP_STRUCT__entry( + __field( __u64, address ) + __field( __u64, data ) + ), + + TP_fast_assign( + __entry->address = address; + __entry->data = data; + ), + + TP_printk("dst %u vec %x (%s|%s|%s%s)", + (u8)(__entry->address >> 12), (u8)__entry->data, + __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode), + (__entry->address & (1<<2)) ? "logical" : "physical", + (__entry->data & (1<<15)) ? "level" : "edge", + (__entry->address & (1<<3)) ? "|rh" : "") +); + +#define kvm_irqchips \ + {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \ + {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \ + {KVM_IRQCHIP_IOAPIC, "IOAPIC"} + +#endif /* defined(__KVM_HAVE_IOAPIC) */ + +#if defined(CONFIG_HAVE_KVM_IRQFD) + +#ifdef kvm_irqchips +#define kvm_ack_irq_string "irqchip %s pin %u" +#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin +#else +#define kvm_ack_irq_string "irqchip %d pin %u" +#define kvm_ack_irq_parm __entry->irqchip, __entry->pin +#endif + +TRACE_EVENT(kvm_ack_irq, + TP_PROTO(unsigned int irqchip, unsigned int pin), + TP_ARGS(irqchip, pin), + + TP_STRUCT__entry( + __field( unsigned int, irqchip ) + __field( unsigned int, pin ) + ), + + TP_fast_assign( + __entry->irqchip = irqchip; + __entry->pin = pin; + ), + + TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm) +); + +#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ + + + +#define KVM_TRACE_MMIO_READ_UNSATISFIED 0 +#define KVM_TRACE_MMIO_READ 1 +#define KVM_TRACE_MMIO_WRITE 2 + +#define kvm_trace_symbol_mmio \ + { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \ + { KVM_TRACE_MMIO_READ, "read" }, \ + { KVM_TRACE_MMIO_WRITE, "write" } + +TRACE_EVENT(kvm_mmio, + TP_PROTO(int type, int len, u64 gpa, u64 val), + TP_ARGS(type, len, gpa, val), + + TP_STRUCT__entry( + __field( u32, type ) + __field( u32, len ) + __field( u64, gpa ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->len = len; + __entry->gpa = gpa; + __entry->val = val; + ), + + TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", + __print_symbolic(__entry->type, kvm_trace_symbol_mmio), + __entry->len, __entry->gpa, __entry->val) +); + +#define kvm_fpu_load_symbol \ + {0, "unload"}, \ + {1, "load"} + +TRACE_EVENT(kvm_fpu, + TP_PROTO(int load), + TP_ARGS(load), + + TP_STRUCT__entry( + __field( u32, load ) + ), + + TP_fast_assign( + __entry->load = load; + ), + + TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol)) +); + +TRACE_EVENT(kvm_age_page, + TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), + TP_ARGS(gfn, level, slot, ref), + + TP_STRUCT__entry( + __field( u64, hva ) + __field( u64, gfn ) + __field( u8, level ) + __field( u8, referenced ) + ), + + TP_fast_assign( + __entry->gfn = gfn; + __entry->level = level; + __entry->hva = ((gfn - slot->base_gfn) << + PAGE_SHIFT) + slot->userspace_addr; + __entry->referenced = ref; + ), + + TP_printk("hva %llx gfn %llx level %u %s", + __entry->hva, __entry->gfn, __entry->level, + __entry->referenced ? "YOUNG" : "OLD") +); + +#ifdef CONFIG_KVM_ASYNC_PF +DECLARE_EVENT_CLASS(kvm_async_get_page_class, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn), + + TP_STRUCT__entry( + __field(__u64, gva) + __field(u64, gfn) + ), + + TP_fast_assign( + __entry->gva = gva; + __entry->gfn = gfn; + ), + + TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn) +); + +DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready, + + TP_PROTO(u64 token, u64 gva), + + TP_ARGS(token, gva), + + TP_STRUCT__entry( + __field(__u64, token) + __field(__u64, gva) + ), + + TP_fast_assign( + __entry->token = token; + __entry->gva = gva; + ), + + TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) + +); + +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present, + + TP_PROTO(u64 token, u64 gva), + + TP_ARGS(token, gva) +); + +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, + + TP_PROTO(u64 token, u64 gva), + + TP_ARGS(token, gva) +); + +TRACE_EVENT( + kvm_async_pf_completed, + TP_PROTO(unsigned long address, u64 gva), + TP_ARGS(address, gva), + + TP_STRUCT__entry( + __field(unsigned long, address) + __field(u64, gva) + ), + + TP_fast_assign( + __entry->address = address; + __entry->gva = gva; + ), + + TP_printk("gva %#llx address %#lx", __entry->gva, + __entry->address) +); + +#endif + +#endif /* _TRACE_KVM_MAIN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/latency_hist.h b/kernel/include/trace/events/latency_hist.h new file mode 100644 index 000000000..d3f2fbd56 --- /dev/null +++ b/kernel/include/trace/events/latency_hist.h @@ -0,0 +1,29 @@ +#ifndef _LATENCY_HIST_H +#define _LATENCY_HIST_H + +enum hist_action { + IRQS_ON, + PREEMPT_ON, + TRACE_STOP, + IRQS_OFF, + PREEMPT_OFF, + TRACE_START, +}; + +static char *actions[] = { + "IRQS_ON", + "PREEMPT_ON", + "TRACE_STOP", + "IRQS_OFF", + "PREEMPT_OFF", + "TRACE_START", +}; + +static inline char *getaction(int action) +{ + if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) + return actions[action]; + return "unknown"; +} + +#endif /* _LATENCY_HIST_H */ diff --git a/kernel/include/trace/events/libata.h b/kernel/include/trace/events/libata.h new file mode 100644 index 000000000..8b0fbd930 --- /dev/null +++ b/kernel/include/trace/events/libata.h @@ -0,0 +1,325 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM libata + +#if !defined(_TRACE_LIBATA_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LIBATA_H + +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +#define ata_opcode_name(opcode) { opcode, #opcode } +#define show_opcode_name(val) \ + __print_symbolic(val, \ + ata_opcode_name(ATA_CMD_DEV_RESET), \ + ata_opcode_name(ATA_CMD_CHK_POWER), \ + ata_opcode_name(ATA_CMD_STANDBY), \ + ata_opcode_name(ATA_CMD_IDLE), \ + ata_opcode_name(ATA_CMD_EDD), \ + ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO), \ + ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO_DMA), \ + ata_opcode_name(ATA_CMD_NOP), \ + ata_opcode_name(ATA_CMD_FLUSH), \ + ata_opcode_name(ATA_CMD_FLUSH_EXT), \ + ata_opcode_name(ATA_CMD_ID_ATA), \ + ata_opcode_name(ATA_CMD_ID_ATAPI), \ + ata_opcode_name(ATA_CMD_SERVICE), \ + ata_opcode_name(ATA_CMD_READ), \ + ata_opcode_name(ATA_CMD_READ_EXT), \ + ata_opcode_name(ATA_CMD_READ_QUEUED), \ + ata_opcode_name(ATA_CMD_READ_STREAM_EXT), \ + ata_opcode_name(ATA_CMD_READ_STREAM_DMA_EXT), \ + ata_opcode_name(ATA_CMD_WRITE), \ + ata_opcode_name(ATA_CMD_WRITE_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_QUEUED), \ + ata_opcode_name(ATA_CMD_WRITE_STREAM_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_STREAM_DMA_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_FUA_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \ + ata_opcode_name(ATA_CMD_FPDMA_READ), \ + ata_opcode_name(ATA_CMD_FPDMA_WRITE), \ + ata_opcode_name(ATA_CMD_FPDMA_SEND), \ + ata_opcode_name(ATA_CMD_FPDMA_RECV), \ + ata_opcode_name(ATA_CMD_PIO_READ), \ + ata_opcode_name(ATA_CMD_PIO_READ_EXT), \ + ata_opcode_name(ATA_CMD_PIO_WRITE), \ + ata_opcode_name(ATA_CMD_PIO_WRITE_EXT), \ + ata_opcode_name(ATA_CMD_READ_MULTI), \ + ata_opcode_name(ATA_CMD_READ_MULTI_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_MULTI), \ + ata_opcode_name(ATA_CMD_WRITE_MULTI_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_MULTI_FUA_EXT), \ + ata_opcode_name(ATA_CMD_SET_FEATURES), \ + ata_opcode_name(ATA_CMD_SET_MULTI), \ + ata_opcode_name(ATA_CMD_PACKET), \ + ata_opcode_name(ATA_CMD_VERIFY), \ + ata_opcode_name(ATA_CMD_VERIFY_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_UNCORR_EXT), \ + ata_opcode_name(ATA_CMD_STANDBYNOW1), \ + ata_opcode_name(ATA_CMD_IDLEIMMEDIATE), \ + ata_opcode_name(ATA_CMD_SLEEP), \ + ata_opcode_name(ATA_CMD_INIT_DEV_PARAMS), \ + ata_opcode_name(ATA_CMD_READ_NATIVE_MAX), \ + ata_opcode_name(ATA_CMD_READ_NATIVE_MAX_EXT), \ + ata_opcode_name(ATA_CMD_SET_MAX), \ + ata_opcode_name(ATA_CMD_SET_MAX_EXT), \ + ata_opcode_name(ATA_CMD_READ_LOG_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_LOG_EXT), \ + ata_opcode_name(ATA_CMD_READ_LOG_DMA_EXT), \ + ata_opcode_name(ATA_CMD_WRITE_LOG_DMA_EXT), \ + ata_opcode_name(ATA_CMD_TRUSTED_NONDATA), \ + ata_opcode_name(ATA_CMD_TRUSTED_RCV), \ + ata_opcode_name(ATA_CMD_TRUSTED_RCV_DMA), \ + ata_opcode_name(ATA_CMD_TRUSTED_SND), \ + ata_opcode_name(ATA_CMD_TRUSTED_SND_DMA), \ + ata_opcode_name(ATA_CMD_PMP_READ), \ + ata_opcode_name(ATA_CMD_PMP_READ_DMA), \ + ata_opcode_name(ATA_CMD_PMP_WRITE), \ + ata_opcode_name(ATA_CMD_PMP_WRITE_DMA), \ + ata_opcode_name(ATA_CMD_CONF_OVERLAY), \ + ata_opcode_name(ATA_CMD_SEC_SET_PASS), \ + ata_opcode_name(ATA_CMD_SEC_UNLOCK), \ + ata_opcode_name(ATA_CMD_SEC_ERASE_PREP), \ + ata_opcode_name(ATA_CMD_SEC_ERASE_UNIT), \ + ata_opcode_name(ATA_CMD_SEC_FREEZE_LOCK), \ + ata_opcode_name(ATA_CMD_SEC_DISABLE_PASS), \ + ata_opcode_name(ATA_CMD_CONFIG_STREAM), \ + ata_opcode_name(ATA_CMD_SMART), \ + ata_opcode_name(ATA_CMD_MEDIA_LOCK), \ + ata_opcode_name(ATA_CMD_MEDIA_UNLOCK), \ + ata_opcode_name(ATA_CMD_DSM), \ + ata_opcode_name(ATA_CMD_CHK_MED_CRD_TYP), \ + ata_opcode_name(ATA_CMD_CFA_REQ_EXT_ERR), \ + ata_opcode_name(ATA_CMD_CFA_WRITE_NE), \ + ata_opcode_name(ATA_CMD_CFA_TRANS_SECT), \ + ata_opcode_name(ATA_CMD_CFA_ERASE), \ + ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE), \ + ata_opcode_name(ATA_CMD_REQ_SENSE_DATA), \ + ata_opcode_name(ATA_CMD_SANITIZE_DEVICE), \ + ata_opcode_name(ATA_CMD_RESTORE), \ + ata_opcode_name(ATA_CMD_READ_LONG), \ + ata_opcode_name(ATA_CMD_READ_LONG_ONCE), \ + ata_opcode_name(ATA_CMD_WRITE_LONG), \ + ata_opcode_name(ATA_CMD_WRITE_LONG_ONCE)) + +#define ata_error_name(result) { result, #result } +#define show_error_name(val) \ + __print_symbolic(val, \ + ata_error_name(ATA_ICRC), \ + ata_error_name(ATA_UNC), \ + ata_error_name(ATA_MC), \ + ata_error_name(ATA_IDNF), \ + ata_error_name(ATA_MCR), \ + ata_error_name(ATA_ABORTED), \ + ata_error_name(ATA_TRK0NF), \ + ata_error_name(ATA_AMNF)) + +#define ata_protocol_name(proto) { proto, #proto } +#define show_protocol_name(val) \ + __print_symbolic(val, \ + ata_protocol_name(ATA_PROT_UNKNOWN), \ + ata_protocol_name(ATA_PROT_NODATA), \ + ata_protocol_name(ATA_PROT_PIO), \ + ata_protocol_name(ATA_PROT_DMA), \ + ata_protocol_name(ATA_PROT_NCQ), \ + ata_protocol_name(ATAPI_PROT_NODATA), \ + ata_protocol_name(ATAPI_PROT_PIO), \ + ata_protocol_name(ATAPI_PROT_DMA)) + +const char *libata_trace_parse_status(struct trace_seq*, unsigned char); +#define __parse_status(s) libata_trace_parse_status(p, s) + +const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int); +#define __parse_eh_action(a) libata_trace_parse_eh_action(p, a) + +const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int); +#define __parse_eh_err_mask(m) libata_trace_parse_eh_err_mask(p, m) + +const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int); +#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f) + +TRACE_EVENT(ata_qc_issue, + + TP_PROTO(struct ata_queued_cmd *qc), + + TP_ARGS(qc), + + TP_STRUCT__entry( + __field( unsigned int, ata_port ) + __field( unsigned int, ata_dev ) + __field( unsigned int, tag ) + __field( unsigned char, cmd ) + __field( unsigned char, dev ) + __field( unsigned char, lbal ) + __field( unsigned char, lbam ) + __field( unsigned char, lbah ) + __field( unsigned char, nsect ) + __field( unsigned char, feature ) + __field( unsigned char, hob_lbal ) + __field( unsigned char, hob_lbam ) + __field( unsigned char, hob_lbah ) + __field( unsigned char, hob_nsect ) + __field( unsigned char, hob_feature ) + __field( unsigned char, ctl ) + __field( unsigned char, proto ) + __field( unsigned long, flags ) + ), + + TP_fast_assign( + __entry->ata_port = qc->ap->print_id; + __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; + __entry->tag = qc->tag; + __entry->proto = qc->tf.protocol; + __entry->cmd = qc->tf.command; + __entry->dev = qc->tf.device; + __entry->lbal = qc->tf.lbal; + __entry->lbam = qc->tf.lbam; + __entry->lbah = qc->tf.lbah; + __entry->hob_lbal = qc->tf.hob_lbal; + __entry->hob_lbam = qc->tf.hob_lbam; + __entry->hob_lbah = qc->tf.hob_lbah; + __entry->feature = qc->tf.feature; + __entry->hob_feature = qc->tf.hob_feature; + __entry->nsect = qc->tf.nsect; + __entry->hob_nsect = qc->tf.hob_nsect; + ), + + TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \ + " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)", + __entry->ata_port, __entry->ata_dev, __entry->tag, + show_protocol_name(__entry->proto), + show_opcode_name(__entry->cmd), + __entry->cmd, __entry->feature, __entry->nsect, + __entry->lbal, __entry->lbam, __entry->lbah, + __entry->hob_feature, __entry->hob_nsect, + __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah, + __entry->dev) +); + +DECLARE_EVENT_CLASS(ata_qc_complete_template, + + TP_PROTO(struct ata_queued_cmd *qc), + + TP_ARGS(qc), + + TP_STRUCT__entry( + __field( unsigned int, ata_port ) + __field( unsigned int, ata_dev ) + __field( unsigned int, tag ) + __field( unsigned char, status ) + __field( unsigned char, dev ) + __field( unsigned char, lbal ) + __field( unsigned char, lbam ) + __field( unsigned char, lbah ) + __field( unsigned char, nsect ) + __field( unsigned char, error ) + __field( unsigned char, hob_lbal ) + __field( unsigned char, hob_lbam ) + __field( unsigned char, hob_lbah ) + __field( unsigned char, hob_nsect ) + __field( unsigned char, hob_feature ) + __field( unsigned char, ctl ) + __field( unsigned long, flags ) + ), + + TP_fast_assign( + __entry->ata_port = qc->ap->print_id; + __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; + __entry->tag = qc->tag; + __entry->status = qc->result_tf.command; + __entry->dev = qc->result_tf.device; + __entry->lbal = qc->result_tf.lbal; + __entry->lbam = qc->result_tf.lbam; + __entry->lbah = qc->result_tf.lbah; + __entry->hob_lbal = qc->result_tf.hob_lbal; + __entry->hob_lbam = qc->result_tf.hob_lbam; + __entry->hob_lbah = qc->result_tf.hob_lbah; + __entry->error = qc->result_tf.feature; + __entry->hob_feature = qc->result_tf.hob_feature; + __entry->nsect = qc->result_tf.nsect; + __entry->hob_nsect = qc->result_tf.hob_nsect; + ), + + TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \ + " res=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)", + __entry->ata_port, __entry->ata_dev, __entry->tag, + __parse_qc_flags(__entry->flags), + __parse_status(__entry->status), + __entry->status, __entry->error, __entry->nsect, + __entry->lbal, __entry->lbam, __entry->lbah, + __entry->hob_feature, __entry->hob_nsect, + __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah, + __entry->dev) +); + +DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_internal, + TP_PROTO(struct ata_queued_cmd *qc), + TP_ARGS(qc)); + +DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_failed, + TP_PROTO(struct ata_queued_cmd *qc), + TP_ARGS(qc)); + +DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done, + TP_PROTO(struct ata_queued_cmd *qc), + TP_ARGS(qc)); + +TRACE_EVENT(ata_eh_link_autopsy, + + TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask), + + TP_ARGS(dev, eh_action, eh_err_mask), + + TP_STRUCT__entry( + __field( unsigned int, ata_port ) + __field( unsigned int, ata_dev ) + __field( unsigned int, eh_action ) + __field( unsigned int, eh_err_mask) + ), + + TP_fast_assign( + __entry->ata_port = dev->link->ap->print_id; + __entry->ata_dev = dev->link->pmp + dev->devno; + __entry->eh_action = eh_action; + __entry->eh_err_mask = eh_err_mask; + ), + + TP_printk("ata_port=%u ata_dev=%u eh_action=%s err_mask=%s", + __entry->ata_port, __entry->ata_dev, + __parse_eh_action(__entry->eh_action), + __parse_eh_err_mask(__entry->eh_err_mask)) +); + +TRACE_EVENT(ata_eh_link_autopsy_qc, + + TP_PROTO(struct ata_queued_cmd *qc), + + TP_ARGS(qc), + + TP_STRUCT__entry( + __field( unsigned int, ata_port ) + __field( unsigned int, ata_dev ) + __field( unsigned int, tag ) + __field( unsigned int, qc_flags ) + __field( unsigned int, eh_err_mask) + ), + + TP_fast_assign( + __entry->ata_port = qc->ap->print_id; + __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; + __entry->tag = qc->tag; + __entry->qc_flags = qc->flags; + __entry->eh_err_mask = qc->err_mask; + ), + + TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s err_mask=%s", + __entry->ata_port, __entry->ata_dev, __entry->tag, + __parse_qc_flags(__entry->qc_flags), + __parse_eh_err_mask(__entry->eh_err_mask)) +); + +#endif /* _TRACE_LIBATA_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/lock.h b/kernel/include/trace/events/lock.h new file mode 100644 index 000000000..2821b86de --- /dev/null +++ b/kernel/include/trace/events/lock.h @@ -0,0 +1,86 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM lock + +#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LOCK_H + +#include <linux/lockdep.h> +#include <linux/tracepoint.h> + +#ifdef CONFIG_LOCKDEP + +TRACE_EVENT(lock_acquire, + + TP_PROTO(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *next_lock, unsigned long ip), + + TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), + + TP_STRUCT__entry( + __field(unsigned int, flags) + __string(name, lock->name) + __field(void *, lockdep_addr) + ), + + TP_fast_assign( + __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); + __assign_str(name, lock->name); + __entry->lockdep_addr = lock; + ), + + TP_printk("%p %s%s%s", __entry->lockdep_addr, + (__entry->flags & 1) ? "try " : "", + (__entry->flags & 2) ? "read " : "", + __get_str(name)) +); + +DECLARE_EVENT_CLASS(lock, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip), + + TP_STRUCT__entry( + __string( name, lock->name ) + __field( void *, lockdep_addr ) + ), + + TP_fast_assign( + __assign_str(name, lock->name); + __entry->lockdep_addr = lock; + ), + + TP_printk("%p %s", __entry->lockdep_addr, __get_str(name)) +); + +DEFINE_EVENT(lock, lock_release, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +#ifdef CONFIG_LOCK_STAT + +DEFINE_EVENT(lock, lock_contended, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +DEFINE_EVENT(lock, lock_acquired, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +#endif +#endif + +#endif /* _TRACE_LOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/mce.h b/kernel/include/trace/events/mce.h new file mode 100644 index 000000000..4cbbcef6b --- /dev/null +++ b/kernel/include/trace/events/mce.h @@ -0,0 +1,69 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mce + +#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MCE_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> +#include <asm/mce.h> + +TRACE_EVENT(mce_record, + + TP_PROTO(struct mce *m), + + TP_ARGS(m), + + TP_STRUCT__entry( + __field( u64, mcgcap ) + __field( u64, mcgstatus ) + __field( u64, status ) + __field( u64, addr ) + __field( u64, misc ) + __field( u64, ip ) + __field( u64, tsc ) + __field( u64, walltime ) + __field( u32, cpu ) + __field( u32, cpuid ) + __field( u32, apicid ) + __field( u32, socketid ) + __field( u8, cs ) + __field( u8, bank ) + __field( u8, cpuvendor ) + ), + + TP_fast_assign( + __entry->mcgcap = m->mcgcap; + __entry->mcgstatus = m->mcgstatus; + __entry->status = m->status; + __entry->addr = m->addr; + __entry->misc = m->misc; + __entry->ip = m->ip; + __entry->tsc = m->tsc; + __entry->walltime = m->time; + __entry->cpu = m->extcpu; + __entry->cpuid = m->cpuid; + __entry->apicid = m->apicid; + __entry->socketid = m->socketid; + __entry->cs = m->cs; + __entry->bank = m->bank; + __entry->cpuvendor = m->cpuvendor; + ), + + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", + __entry->cpu, + __entry->mcgcap, __entry->mcgstatus, + __entry->bank, __entry->status, + __entry->addr, __entry->misc, + __entry->cs, __entry->ip, + __entry->tsc, + __entry->cpuvendor, __entry->cpuid, + __entry->walltime, + __entry->socketid, + __entry->apicid) +); + +#endif /* _TRACE_MCE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/migrate.h b/kernel/include/trace/events/migrate.h new file mode 100644 index 000000000..539b25a76 --- /dev/null +++ b/kernel/include/trace/events/migrate.h @@ -0,0 +1,102 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM migrate + +#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MIGRATE_H + +#include <linux/tracepoint.h> + +#define MIGRATE_MODE \ + EM( MIGRATE_ASYNC, "MIGRATE_ASYNC") \ + EM( MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT") \ + EMe(MIGRATE_SYNC, "MIGRATE_SYNC") + + +#define MIGRATE_REASON \ + EM( MR_COMPACTION, "compaction") \ + EM( MR_MEMORY_FAILURE, "memory_failure") \ + EM( MR_MEMORY_HOTPLUG, "memory_hotplug") \ + EM( MR_SYSCALL, "syscall_or_cpuset") \ + EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ + EM( MR_NUMA_MISPLACED, "numa_misplaced") \ + EMe(MR_CMA, "cma") + +/* + * First define the enums in the above macros to be exported to userspace + * via TRACE_DEFINE_ENUM(). + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +MIGRATE_MODE +MIGRATE_REASON + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +TRACE_EVENT(mm_migrate_pages, + + TP_PROTO(unsigned long succeeded, unsigned long failed, + enum migrate_mode mode, int reason), + + TP_ARGS(succeeded, failed, mode, reason), + + TP_STRUCT__entry( + __field( unsigned long, succeeded) + __field( unsigned long, failed) + __field( enum migrate_mode, mode) + __field( int, reason) + ), + + TP_fast_assign( + __entry->succeeded = succeeded; + __entry->failed = failed; + __entry->mode = mode; + __entry->reason = reason; + ), + + TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s", + __entry->succeeded, + __entry->failed, + __print_symbolic(__entry->mode, MIGRATE_MODE), + __print_symbolic(__entry->reason, MIGRATE_REASON)) +); + +TRACE_EVENT(mm_numa_migrate_ratelimit, + + TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages), + + TP_ARGS(p, dst_nid, nr_pages), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN) + __field( pid_t, pid) + __field( int, dst_nid) + __field( unsigned long, nr_pages) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->dst_nid = dst_nid; + __entry->nr_pages = nr_pages; + ), + + TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu", + __entry->comm, + __entry->pid, + __entry->dst_nid, + __entry->nr_pages) +); +#endif /* _TRACE_MIGRATE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/module.h b/kernel/include/trace/events/module.h new file mode 100644 index 000000000..28c45997e --- /dev/null +++ b/kernel/include/trace/events/module.h @@ -0,0 +1,133 @@ +/* + * Because linux/module.h has tracepoints in the header, and ftrace.h + * used to include this file, define_trace.h includes linux/module.h + * But we do not want the module.h to override the TRACE_SYSTEM macro + * variable that define_trace.h is processing, so we only set it + * when module events are being processed, which would happen when + * CREATE_TRACE_POINTS is defined. + */ +#ifdef CREATE_TRACE_POINTS +#undef TRACE_SYSTEM +#define TRACE_SYSTEM module +#endif + +#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MODULE_H + +#include <linux/tracepoint.h> + +#ifdef CONFIG_MODULES + +struct module; + +#define show_module_flags(flags) __print_flags(flags, "", \ + { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \ + { (1UL << TAINT_OOT_MODULE), "O" }, \ + { (1UL << TAINT_FORCED_MODULE), "F" }, \ + { (1UL << TAINT_CRAP), "C" }, \ + { (1UL << TAINT_UNSIGNED_MODULE), "E" }) + +TRACE_EVENT(module_load, + + TP_PROTO(struct module *mod), + + TP_ARGS(mod), + + TP_STRUCT__entry( + __field( unsigned int, taints ) + __string( name, mod->name ) + ), + + TP_fast_assign( + __entry->taints = mod->taints; + __assign_str(name, mod->name); + ), + + TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints)) +); + +TRACE_EVENT(module_free, + + TP_PROTO(struct module *mod), + + TP_ARGS(mod), + + TP_STRUCT__entry( + __string( name, mod->name ) + ), + + TP_fast_assign( + __assign_str(name, mod->name); + ), + + TP_printk("%s", __get_str(name)) +); + +#ifdef CONFIG_MODULE_UNLOAD +/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */ + +DECLARE_EVENT_CLASS(module_refcnt, + + TP_PROTO(struct module *mod, unsigned long ip), + + TP_ARGS(mod, ip), + + TP_STRUCT__entry( + __field( unsigned long, ip ) + __field( int, refcnt ) + __string( name, mod->name ) + ), + + TP_fast_assign( + __entry->ip = ip; + __entry->refcnt = atomic_read(&mod->refcnt); + __assign_str(name, mod->name); + ), + + TP_printk("%s call_site=%ps refcnt=%d", + __get_str(name), (void *)__entry->ip, __entry->refcnt) +); + +DEFINE_EVENT(module_refcnt, module_get, + + TP_PROTO(struct module *mod, unsigned long ip), + + TP_ARGS(mod, ip) +); + +DEFINE_EVENT(module_refcnt, module_put, + + TP_PROTO(struct module *mod, unsigned long ip), + + TP_ARGS(mod, ip) +); +#endif /* CONFIG_MODULE_UNLOAD */ + +TRACE_EVENT(module_request, + + TP_PROTO(char *name, bool wait, unsigned long ip), + + TP_ARGS(name, wait, ip), + + TP_STRUCT__entry( + __field( unsigned long, ip ) + __field( bool, wait ) + __string( name, name ) + ), + + TP_fast_assign( + __entry->ip = ip; + __entry->wait = wait; + __assign_str(name, name); + ), + + TP_printk("%s wait=%d call_site=%ps", + __get_str(name), (int)__entry->wait, (void *)__entry->ip) +); + +#endif /* CONFIG_MODULES */ + +#endif /* _TRACE_MODULE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/napi.h b/kernel/include/trace/events/napi.h new file mode 100644 index 000000000..8fe1e93f5 --- /dev/null +++ b/kernel/include/trace/events/napi.h @@ -0,0 +1,38 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM napi + +#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NAPI_H_ + +#include <linux/netdevice.h> +#include <linux/tracepoint.h> +#include <linux/ftrace.h> + +#define NO_DEV "(no_device)" + +TRACE_EVENT(napi_poll, + + TP_PROTO(struct napi_struct *napi), + + TP_ARGS(napi), + + TP_STRUCT__entry( + __field( struct napi_struct *, napi) + __string( dev_name, napi->dev ? napi->dev->name : NO_DEV) + ), + + TP_fast_assign( + __entry->napi = napi; + __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); + ), + + TP_printk("napi poll on napi struct %p for device %s", + __entry->napi, __get_str(dev_name)) +); + +#undef NO_DEV + +#endif /* _TRACE_NAPI_H_ */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/net.h b/kernel/include/trace/events/net.h new file mode 100644 index 000000000..49cc7c3de --- /dev/null +++ b/kernel/include/trace/events/net.h @@ -0,0 +1,242 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM net + +#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NET_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(net_dev_start_xmit, + + TP_PROTO(const struct sk_buff *skb, const struct net_device *dev), + + TP_ARGS(skb, dev), + + TP_STRUCT__entry( + __string( name, dev->name ) + __field( u16, queue_mapping ) + __field( const void *, skbaddr ) + __field( bool, vlan_tagged ) + __field( u16, vlan_proto ) + __field( u16, vlan_tci ) + __field( u16, protocol ) + __field( u8, ip_summed ) + __field( unsigned int, len ) + __field( unsigned int, data_len ) + __field( int, network_offset ) + __field( bool, transport_offset_valid) + __field( int, transport_offset) + __field( u8, tx_flags ) + __field( u16, gso_size ) + __field( u16, gso_segs ) + __field( u16, gso_type ) + ), + + TP_fast_assign( + __assign_str(name, dev->name); + __entry->queue_mapping = skb->queue_mapping; + __entry->skbaddr = skb; + __entry->vlan_tagged = skb_vlan_tag_present(skb); + __entry->vlan_proto = ntohs(skb->vlan_proto); + __entry->vlan_tci = skb_vlan_tag_get(skb); + __entry->protocol = ntohs(skb->protocol); + __entry->ip_summed = skb->ip_summed; + __entry->len = skb->len; + __entry->data_len = skb->data_len; + __entry->network_offset = skb_network_offset(skb); + __entry->transport_offset_valid = + skb_transport_header_was_set(skb); + __entry->transport_offset = skb_transport_offset(skb); + __entry->tx_flags = skb_shinfo(skb)->tx_flags; + __entry->gso_size = skb_shinfo(skb)->gso_size; + __entry->gso_segs = skb_shinfo(skb)->gso_segs; + __entry->gso_type = skb_shinfo(skb)->gso_type; + ), + + TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x", + __get_str(name), __entry->queue_mapping, __entry->skbaddr, + __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci, + __entry->protocol, __entry->ip_summed, __entry->len, + __entry->data_len, + __entry->network_offset, __entry->transport_offset_valid, + __entry->transport_offset, __entry->tx_flags, + __entry->gso_size, __entry->gso_segs, __entry->gso_type) +); + +TRACE_EVENT(net_dev_xmit, + + TP_PROTO(struct sk_buff *skb, + int rc, + struct net_device *dev, + unsigned int skb_len), + + TP_ARGS(skb, rc, dev, skb_len), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + __field( unsigned int, len ) + __field( int, rc ) + __string( name, dev->name ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = skb_len; + __entry->rc = rc; + __assign_str(name, dev->name); + ), + + TP_printk("dev=%s skbaddr=%p len=%u rc=%d", + __get_str(name), __entry->skbaddr, __entry->len, __entry->rc) +); + +DECLARE_EVENT_CLASS(net_dev_template, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + __field( unsigned int, len ) + __string( name, skb->dev->name ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = skb->len; + __assign_str(name, skb->dev->name); + ), + + TP_printk("dev=%s skbaddr=%p len=%u", + __get_str(name), __entry->skbaddr, __entry->len) +) + +DEFINE_EVENT(net_dev_template, net_dev_queue, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_template, netif_receive_skb, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_template, netif_rx, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb) +); + +DECLARE_EVENT_CLASS(net_dev_rx_verbose_template, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __string( name, skb->dev->name ) + __field( unsigned int, napi_id ) + __field( u16, queue_mapping ) + __field( const void *, skbaddr ) + __field( bool, vlan_tagged ) + __field( u16, vlan_proto ) + __field( u16, vlan_tci ) + __field( u16, protocol ) + __field( u8, ip_summed ) + __field( u32, hash ) + __field( bool, l4_hash ) + __field( unsigned int, len ) + __field( unsigned int, data_len ) + __field( unsigned int, truesize ) + __field( bool, mac_header_valid) + __field( int, mac_header ) + __field( unsigned char, nr_frags ) + __field( u16, gso_size ) + __field( u16, gso_type ) + ), + + TP_fast_assign( + __assign_str(name, skb->dev->name); +#ifdef CONFIG_NET_RX_BUSY_POLL + __entry->napi_id = skb->napi_id; +#else + __entry->napi_id = 0; +#endif + __entry->queue_mapping = skb->queue_mapping; + __entry->skbaddr = skb; + __entry->vlan_tagged = skb_vlan_tag_present(skb); + __entry->vlan_proto = ntohs(skb->vlan_proto); + __entry->vlan_tci = skb_vlan_tag_get(skb); + __entry->protocol = ntohs(skb->protocol); + __entry->ip_summed = skb->ip_summed; + __entry->hash = skb->hash; + __entry->l4_hash = skb->l4_hash; + __entry->len = skb->len; + __entry->data_len = skb->data_len; + __entry->truesize = skb->truesize; + __entry->mac_header_valid = skb_mac_header_was_set(skb); + __entry->mac_header = skb_mac_header(skb) - skb->data; + __entry->nr_frags = skb_shinfo(skb)->nr_frags; + __entry->gso_size = skb_shinfo(skb)->gso_size; + __entry->gso_type = skb_shinfo(skb)->gso_type; + ), + + TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x", + __get_str(name), __entry->napi_id, __entry->queue_mapping, + __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto, + __entry->vlan_tci, __entry->protocol, __entry->ip_summed, + __entry->hash, __entry->l4_hash, __entry->len, + __entry->data_len, __entry->truesize, + __entry->mac_header_valid, __entry->mac_header, + __entry->nr_frags, __entry->gso_size, __entry->gso_type) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + +#endif /* _TRACE_NET_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/nmi.h b/kernel/include/trace/events/nmi.h new file mode 100644 index 000000000..da3ee96b8 --- /dev/null +++ b/kernel/include/trace/events/nmi.h @@ -0,0 +1,37 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nmi + +#if !defined(_TRACE_NMI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NMI_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(nmi_handler, + + TP_PROTO(void *handler, s64 delta_ns, int handled), + + TP_ARGS(handler, delta_ns, handled), + + TP_STRUCT__entry( + __field( void *, handler ) + __field( s64, delta_ns) + __field( int, handled ) + ), + + TP_fast_assign( + __entry->handler = handler; + __entry->delta_ns = delta_ns; + __entry->handled = handled; + ), + + TP_printk("%ps() delta_ns: %lld handled: %d", + __entry->handler, + __entry->delta_ns, + __entry->handled) +); + +#endif /* _TRACE_NMI_H */ + +/* This part ust be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/oom.h b/kernel/include/trace/events/oom.h new file mode 100644 index 000000000..1e9749837 --- /dev/null +++ b/kernel/include/trace/events/oom.h @@ -0,0 +1,33 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM oom + +#if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_OOM_H +#include <linux/tracepoint.h> + +TRACE_EVENT(oom_score_adj_update, + + TP_PROTO(struct task_struct *task), + + TP_ARGS(task), + + TP_STRUCT__entry( + __field( pid_t, pid) + __array( char, comm, TASK_COMM_LEN ) + __field( short, oom_score_adj) + ), + + TP_fast_assign( + __entry->pid = task->pid; + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->oom_score_adj = task->signal->oom_score_adj; + ), + + TP_printk("pid=%d comm=%s oom_score_adj=%hd", + __entry->pid, __entry->comm, __entry->oom_score_adj) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/pagemap.h b/kernel/include/trace/events/pagemap.h new file mode 100644 index 000000000..ce0803b8d --- /dev/null +++ b/kernel/include/trace/events/pagemap.h @@ -0,0 +1,87 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pagemap + +#if !defined(_TRACE_PAGEMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGEMAP_H + +#include <linux/tracepoint.h> +#include <linux/mm.h> + +#define PAGEMAP_MAPPED 0x0001u +#define PAGEMAP_ANONYMOUS 0x0002u +#define PAGEMAP_FILE 0x0004u +#define PAGEMAP_SWAPCACHE 0x0008u +#define PAGEMAP_SWAPBACKED 0x0010u +#define PAGEMAP_MAPPEDDISK 0x0020u +#define PAGEMAP_BUFFERS 0x0040u + +#define trace_pagemap_flags(page) ( \ + (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ + (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \ + (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \ + (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \ + (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \ + (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \ + ) + +TRACE_EVENT(mm_lru_insertion, + + TP_PROTO( + struct page *page, + int lru + ), + + TP_ARGS(page, lru), + + TP_STRUCT__entry( + __field(struct page *, page ) + __field(unsigned long, pfn ) + __field(int, lru ) + __field(unsigned long, flags ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->pfn = page_to_pfn(page); + __entry->lru = lru; + __entry->flags = trace_pagemap_flags(page); + ), + + /* Flag format is based on page-types.c formatting for pagemap */ + TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s", + __entry->page, + __entry->pfn, + __entry->lru, + __entry->flags & PAGEMAP_MAPPED ? "M" : " ", + __entry->flags & PAGEMAP_ANONYMOUS ? "a" : "f", + __entry->flags & PAGEMAP_SWAPCACHE ? "s" : " ", + __entry->flags & PAGEMAP_SWAPBACKED ? "b" : " ", + __entry->flags & PAGEMAP_MAPPEDDISK ? "d" : " ", + __entry->flags & PAGEMAP_BUFFERS ? "B" : " ") +); + +TRACE_EVENT(mm_lru_activate, + + TP_PROTO(struct page *page), + + TP_ARGS(page), + + TP_STRUCT__entry( + __field(struct page *, page ) + __field(unsigned long, pfn ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->pfn = page_to_pfn(page); + ), + + /* Flag format is based on page-types.c formatting for pagemap */ + TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn) + +); + +#endif /* _TRACE_PAGEMAP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/power.h b/kernel/include/trace/events/power.h new file mode 100644 index 000000000..d19840b0c --- /dev/null +++ b/kernel/include/trace/events/power.h @@ -0,0 +1,473 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM power + +#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_POWER_H + +#include <linux/ktime.h> +#include <linux/pm_qos.h> +#include <linux/tracepoint.h> +#include <linux/ftrace_event.h> + +#define TPS(x) tracepoint_string(x) + +DECLARE_EVENT_CLASS(cpu, + + TP_PROTO(unsigned int state, unsigned int cpu_id), + + TP_ARGS(state, cpu_id), + + TP_STRUCT__entry( + __field( u32, state ) + __field( u32, cpu_id ) + ), + + TP_fast_assign( + __entry->state = state; + __entry->cpu_id = cpu_id; + ), + + TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state, + (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(cpu, cpu_idle, + + TP_PROTO(unsigned int state, unsigned int cpu_id), + + TP_ARGS(state, cpu_id) +); + +TRACE_EVENT(pstate_sample, + + TP_PROTO(u32 core_busy, + u32 scaled_busy, + u32 state, + u64 mperf, + u64 aperf, + u32 freq + ), + + TP_ARGS(core_busy, + scaled_busy, + state, + mperf, + aperf, + freq + ), + + TP_STRUCT__entry( + __field(u32, core_busy) + __field(u32, scaled_busy) + __field(u32, state) + __field(u64, mperf) + __field(u64, aperf) + __field(u32, freq) + + ), + + TP_fast_assign( + __entry->core_busy = core_busy; + __entry->scaled_busy = scaled_busy; + __entry->state = state; + __entry->mperf = mperf; + __entry->aperf = aperf; + __entry->freq = freq; + ), + + TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ", + (unsigned long)__entry->core_busy, + (unsigned long)__entry->scaled_busy, + (unsigned long)__entry->state, + (unsigned long long)__entry->mperf, + (unsigned long long)__entry->aperf, + (unsigned long)__entry->freq + ) + +); + +/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ +#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING +#define _PWR_EVENT_AVOID_DOUBLE_DEFINING + +#define PWR_EVENT_EXIT -1 +#endif + +#define pm_verb_symbolic(event) \ + __print_symbolic(event, \ + { PM_EVENT_SUSPEND, "suspend" }, \ + { PM_EVENT_RESUME, "resume" }, \ + { PM_EVENT_FREEZE, "freeze" }, \ + { PM_EVENT_QUIESCE, "quiesce" }, \ + { PM_EVENT_HIBERNATE, "hibernate" }, \ + { PM_EVENT_THAW, "thaw" }, \ + { PM_EVENT_RESTORE, "restore" }, \ + { PM_EVENT_RECOVER, "recover" }) + +DEFINE_EVENT(cpu, cpu_frequency, + + TP_PROTO(unsigned int frequency, unsigned int cpu_id), + + TP_ARGS(frequency, cpu_id) +); + +TRACE_EVENT(device_pm_callback_start, + + TP_PROTO(struct device *dev, const char *pm_ops, int event), + + TP_ARGS(dev, pm_ops, event), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __string(driver, dev_driver_string(dev)) + __string(parent, dev->parent ? dev_name(dev->parent) : "none") + __string(pm_ops, pm_ops ? pm_ops : "none ") + __field(int, event) + ), + + TP_fast_assign( + __assign_str(device, dev_name(dev)); + __assign_str(driver, dev_driver_string(dev)); + __assign_str(parent, + dev->parent ? dev_name(dev->parent) : "none"); + __assign_str(pm_ops, pm_ops ? pm_ops : "none "); + __entry->event = event; + ), + + TP_printk("%s %s, parent: %s, %s[%s]", __get_str(driver), + __get_str(device), __get_str(parent), __get_str(pm_ops), + pm_verb_symbolic(__entry->event)) +); + +TRACE_EVENT(device_pm_callback_end, + + TP_PROTO(struct device *dev, int error), + + TP_ARGS(dev, error), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __string(driver, dev_driver_string(dev)) + __field(int, error) + ), + + TP_fast_assign( + __assign_str(device, dev_name(dev)); + __assign_str(driver, dev_driver_string(dev)); + __entry->error = error; + ), + + TP_printk("%s %s, err=%d", + __get_str(driver), __get_str(device), __entry->error) +); + +TRACE_EVENT(suspend_resume, + + TP_PROTO(const char *action, int val, bool start), + + TP_ARGS(action, val, start), + + TP_STRUCT__entry( + __field(const char *, action) + __field(int, val) + __field(bool, start) + ), + + TP_fast_assign( + __entry->action = action; + __entry->val = val; + __entry->start = start; + ), + + TP_printk("%s[%u] %s", __entry->action, (unsigned int)__entry->val, + (__entry->start)?"begin":"end") +); + +DECLARE_EVENT_CLASS(wakeup_source, + + TP_PROTO(const char *name, unsigned int state), + + TP_ARGS(name, state), + + TP_STRUCT__entry( + __string( name, name ) + __field( u64, state ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->state = state; + ), + + TP_printk("%s state=0x%lx", __get_str(name), + (unsigned long)__entry->state) +); + +DEFINE_EVENT(wakeup_source, wakeup_source_activate, + + TP_PROTO(const char *name, unsigned int state), + + TP_ARGS(name, state) +); + +DEFINE_EVENT(wakeup_source, wakeup_source_deactivate, + + TP_PROTO(const char *name, unsigned int state), + + TP_ARGS(name, state) +); + +/* + * The clock events are used for clock enable/disable and for + * clock rate change + */ +DECLARE_EVENT_CLASS(clock, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id), + + TP_STRUCT__entry( + __string( name, name ) + __field( u64, state ) + __field( u64, cpu_id ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->state = state; + __entry->cpu_id = cpu_id; + ), + + TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(clock, clock_enable, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +DEFINE_EVENT(clock, clock_disable, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +DEFINE_EVENT(clock, clock_set_rate, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +/* + * The power domain events are used for power domains transitions + */ +DECLARE_EVENT_CLASS(power_domain, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id), + + TP_STRUCT__entry( + __string( name, name ) + __field( u64, state ) + __field( u64, cpu_id ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->state = state; + __entry->cpu_id = cpu_id; +), + + TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(power_domain, power_domain_target, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +/* + * The pm qos events are used for pm qos update + */ +DECLARE_EVENT_CLASS(pm_qos_request, + + TP_PROTO(int pm_qos_class, s32 value), + + TP_ARGS(pm_qos_class, value), + + TP_STRUCT__entry( + __field( int, pm_qos_class ) + __field( s32, value ) + ), + + TP_fast_assign( + __entry->pm_qos_class = pm_qos_class; + __entry->value = value; + ), + + TP_printk("pm_qos_class=%s value=%d", + __print_symbolic(__entry->pm_qos_class, + { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }, + { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" }, + { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }), + __entry->value) +); + +DEFINE_EVENT(pm_qos_request, pm_qos_add_request, + + TP_PROTO(int pm_qos_class, s32 value), + + TP_ARGS(pm_qos_class, value) +); + +DEFINE_EVENT(pm_qos_request, pm_qos_update_request, + + TP_PROTO(int pm_qos_class, s32 value), + + TP_ARGS(pm_qos_class, value) +); + +DEFINE_EVENT(pm_qos_request, pm_qos_remove_request, + + TP_PROTO(int pm_qos_class, s32 value), + + TP_ARGS(pm_qos_class, value) +); + +TRACE_EVENT(pm_qos_update_request_timeout, + + TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us), + + TP_ARGS(pm_qos_class, value, timeout_us), + + TP_STRUCT__entry( + __field( int, pm_qos_class ) + __field( s32, value ) + __field( unsigned long, timeout_us ) + ), + + TP_fast_assign( + __entry->pm_qos_class = pm_qos_class; + __entry->value = value; + __entry->timeout_us = timeout_us; + ), + + TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld", + __print_symbolic(__entry->pm_qos_class, + { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }, + { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" }, + { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }), + __entry->value, __entry->timeout_us) +); + +DECLARE_EVENT_CLASS(pm_qos_update, + + TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), + + TP_ARGS(action, prev_value, curr_value), + + TP_STRUCT__entry( + __field( enum pm_qos_req_action, action ) + __field( int, prev_value ) + __field( int, curr_value ) + ), + + TP_fast_assign( + __entry->action = action; + __entry->prev_value = prev_value; + __entry->curr_value = curr_value; + ), + + TP_printk("action=%s prev_value=%d curr_value=%d", + __print_symbolic(__entry->action, + { PM_QOS_ADD_REQ, "ADD_REQ" }, + { PM_QOS_UPDATE_REQ, "UPDATE_REQ" }, + { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }), + __entry->prev_value, __entry->curr_value) +); + +DEFINE_EVENT(pm_qos_update, pm_qos_update_target, + + TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), + + TP_ARGS(action, prev_value, curr_value) +); + +DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags, + + TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), + + TP_ARGS(action, prev_value, curr_value), + + TP_printk("action=%s prev_value=0x%x curr_value=0x%x", + __print_symbolic(__entry->action, + { PM_QOS_ADD_REQ, "ADD_REQ" }, + { PM_QOS_UPDATE_REQ, "UPDATE_REQ" }, + { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }), + __entry->prev_value, __entry->curr_value) +); + +DECLARE_EVENT_CLASS(dev_pm_qos_request, + + TP_PROTO(const char *name, enum dev_pm_qos_req_type type, + s32 new_value), + + TP_ARGS(name, type, new_value), + + TP_STRUCT__entry( + __string( name, name ) + __field( enum dev_pm_qos_req_type, type ) + __field( s32, new_value ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->type = type; + __entry->new_value = new_value; + ), + + TP_printk("device=%s type=%s new_value=%d", + __get_str(name), + __print_symbolic(__entry->type, + { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" }, + { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }), + __entry->new_value) +); + +DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request, + + TP_PROTO(const char *name, enum dev_pm_qos_req_type type, + s32 new_value), + + TP_ARGS(name, type, new_value) +); + +DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request, + + TP_PROTO(const char *name, enum dev_pm_qos_req_type type, + s32 new_value), + + TP_ARGS(name, type, new_value) +); + +DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request, + + TP_PROTO(const char *name, enum dev_pm_qos_req_type type, + s32 new_value), + + TP_ARGS(name, type, new_value) +); +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/power_cpu_migrate.h b/kernel/include/trace/events/power_cpu_migrate.h new file mode 100644 index 000000000..f76dd4de6 --- /dev/null +++ b/kernel/include/trace/events/power_cpu_migrate.h @@ -0,0 +1,67 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM power + +#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_POWER_CPU_MIGRATE_H + +#include <linux/tracepoint.h> + +#define __cpu_migrate_proto \ + TP_PROTO(u64 timestamp, \ + u32 cpu_hwid) +#define __cpu_migrate_args \ + TP_ARGS(timestamp, \ + cpu_hwid) + +DECLARE_EVENT_CLASS(cpu_migrate, + + __cpu_migrate_proto, + __cpu_migrate_args, + + TP_STRUCT__entry( + __field(u64, timestamp ) + __field(u32, cpu_hwid ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->cpu_hwid = cpu_hwid; + ), + + TP_printk("timestamp=%llu cpu_hwid=0x%08lX", + (unsigned long long)__entry->timestamp, + (unsigned long)__entry->cpu_hwid + ) +); + +#define __define_cpu_migrate_event(name) \ + DEFINE_EVENT(cpu_migrate, cpu_migrate_##name, \ + __cpu_migrate_proto, \ + __cpu_migrate_args \ + ) + +__define_cpu_migrate_event(begin); +__define_cpu_migrate_event(finish); +__define_cpu_migrate_event(current); + +#undef __define_cpu_migrate +#undef __cpu_migrate_proto +#undef __cpu_migrate_args + +/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ +#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING +#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING + +/* + * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate + * a whole-cluster migration: + */ +#define CPU_MIGRATE_ALL_CPUS 0x80000000U +#endif + +#endif /* _TRACE_POWER_CPU_MIGRATE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE power_cpu_migrate +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/printk.h b/kernel/include/trace/events/printk.h new file mode 100644 index 000000000..c008bc99f --- /dev/null +++ b/kernel/include/trace/events/printk.h @@ -0,0 +1,28 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM printk + +#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PRINTK_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(console, + TP_PROTO(const char *text, size_t len), + + TP_ARGS(text, len), + + TP_STRUCT__entry( + __dynamic_array(char, msg, len + 1) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(msg), text, len); + ((char *)__get_dynamic_array(msg))[len] = 0; + ), + + TP_printk("%s", __get_str(msg)) +); +#endif /* _TRACE_PRINTK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/random.h b/kernel/include/trace/events/random.h new file mode 100644 index 000000000..4684de344 --- /dev/null +++ b/kernel/include/trace/events/random.h @@ -0,0 +1,315 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM random + +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RANDOM_H + +#include <linux/writeback.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(add_device_randomness, + TP_PROTO(int bytes, unsigned long IP), + + TP_ARGS(bytes, IP), + + TP_STRUCT__entry( + __field( int, bytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->bytes = bytes; + __entry->IP = IP; + ), + + TP_printk("bytes %d caller %pS", + __entry->bytes, (void *)__entry->IP) +); + +DECLARE_EVENT_CLASS(random__mix_pool_bytes, + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, bytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->bytes = bytes; + __entry->IP = IP; + ), + + TP_printk("%s pool: bytes %d caller %pS", + __entry->pool_name, __entry->bytes, (void *)__entry->IP) +); + +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP) +); + +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP) +); + +TRACE_EVENT(credit_entropy_bits, + TP_PROTO(const char *pool_name, int bits, int entropy_count, + int entropy_total, unsigned long IP), + + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, bits ) + __field( int, entropy_count ) + __field( int, entropy_total ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->bits = bits; + __entry->entropy_count = entropy_count; + __entry->entropy_total = entropy_total; + __entry->IP = IP; + ), + + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " + "caller %pS", __entry->pool_name, __entry->bits, + __entry->entropy_count, __entry->entropy_total, + (void *)__entry->IP) +); + +TRACE_EVENT(push_to_pool, + TP_PROTO(const char *pool_name, int pool_bits, int input_bits), + + TP_ARGS(pool_name, pool_bits, input_bits), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, pool_bits ) + __field( int, input_bits ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->pool_bits = pool_bits; + __entry->input_bits = input_bits; + ), + + TP_printk("%s: pool_bits %d input_pool_bits %d", + __entry->pool_name, __entry->pool_bits, + __entry->input_bits) +); + +TRACE_EVENT(debit_entropy, + TP_PROTO(const char *pool_name, int debit_bits), + + TP_ARGS(pool_name, debit_bits), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, debit_bits ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->debit_bits = debit_bits; + ), + + TP_printk("%s: debit_bits %d", __entry->pool_name, + __entry->debit_bits) +); + +TRACE_EVENT(add_input_randomness, + TP_PROTO(int input_bits), + + TP_ARGS(input_bits), + + TP_STRUCT__entry( + __field( int, input_bits ) + ), + + TP_fast_assign( + __entry->input_bits = input_bits; + ), + + TP_printk("input_pool_bits %d", __entry->input_bits) +); + +TRACE_EVENT(add_disk_randomness, + TP_PROTO(dev_t dev, int input_bits), + + TP_ARGS(dev, input_bits), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, input_bits ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->input_bits = input_bits; + ), + + TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->input_bits) +); + +TRACE_EVENT(xfer_secondary_pool, + TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, + int pool_entropy, int input_entropy), + + TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, + input_entropy), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, xfer_bits ) + __field( int, request_bits ) + __field( int, pool_entropy ) + __field( int, input_entropy ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->xfer_bits = xfer_bits; + __entry->request_bits = request_bits; + __entry->pool_entropy = pool_entropy; + __entry->input_entropy = input_entropy; + ), + + TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " + "input_entropy %d", __entry->pool_name, __entry->xfer_bits, + __entry->request_bits, __entry->pool_entropy, + __entry->input_entropy) +); + +DECLARE_EVENT_CLASS(random__get_random_bytes, + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP), + + TP_STRUCT__entry( + __field( int, nbytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->nbytes = nbytes; + __entry->IP = IP; + ), + + TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP) +); + +DEFINE_EVENT(random__get_random_bytes, get_random_bytes, + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP) +); + +DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP) +); + +DECLARE_EVENT_CLASS(random__extract_entropy, + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, nbytes ) + __field( int, entropy_count ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->nbytes = nbytes; + __entry->entropy_count = entropy_count; + __entry->IP = IP; + ), + + TP_printk("%s pool: nbytes %d entropy_count %d caller %pS", + __entry->pool_name, __entry->nbytes, __entry->entropy_count, + (void *)__entry->IP) +); + + +DEFINE_EVENT(random__extract_entropy, extract_entropy, + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP) +); + +DEFINE_EVENT(random__extract_entropy, extract_entropy_user, + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP) +); + +TRACE_EVENT(random_read, + TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), + + TP_ARGS(got_bits, need_bits, pool_left, input_left), + + TP_STRUCT__entry( + __field( int, got_bits ) + __field( int, need_bits ) + __field( int, pool_left ) + __field( int, input_left ) + ), + + TP_fast_assign( + __entry->got_bits = got_bits; + __entry->need_bits = need_bits; + __entry->pool_left = pool_left; + __entry->input_left = input_left; + ), + + TP_printk("got_bits %d still_needed_bits %d " + "blocking_pool_entropy_left %d input_entropy_left %d", + __entry->got_bits, __entry->got_bits, __entry->pool_left, + __entry->input_left) +); + +TRACE_EVENT(urandom_read, + TP_PROTO(int got_bits, int pool_left, int input_left), + + TP_ARGS(got_bits, pool_left, input_left), + + TP_STRUCT__entry( + __field( int, got_bits ) + __field( int, pool_left ) + __field( int, input_left ) + ), + + TP_fast_assign( + __entry->got_bits = got_bits; + __entry->pool_left = pool_left; + __entry->input_left = input_left; + ), + + TP_printk("got_bits %d nonblocking_pool_entropy_left %d " + "input_entropy_left %d", __entry->got_bits, + __entry->pool_left, __entry->input_left) +); + +#endif /* _TRACE_RANDOM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/rcu.h b/kernel/include/trace/events/rcu.h new file mode 100644 index 000000000..c78e88ce5 --- /dev/null +++ b/kernel/include/trace/events/rcu.h @@ -0,0 +1,740 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rcu + +#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RCU_H + +#include <linux/tracepoint.h> + +/* + * Tracepoint for start/end markers used for utilization calculations. + * By convention, the string is of the following forms: + * + * "Start <activity>" -- Mark the start of the specified activity, + * such as "context switch". Nesting is permitted. + * "End <activity>" -- Mark the end of the specified activity. + * + * An "@" character within "<activity>" is a comment character: Data + * reduction scripts will ignore the "@" and the remainder of the line. + */ +TRACE_EVENT(rcu_utilization, + + TP_PROTO(const char *s), + + TP_ARGS(s), + + TP_STRUCT__entry( + __field(const char *, s) + ), + + TP_fast_assign( + __entry->s = s; + ), + + TP_printk("%s", __entry->s) +); + +#ifdef CONFIG_RCU_TRACE + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) + +/* + * Tracepoint for grace-period events. Takes a string identifying the + * RCU flavor, the grace-period number, and a string identifying the + * grace-period-related event as follows: + * + * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. + * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. + * "newreq": Request a new grace period. + * "start": Start a grace period. + * "cpustart": CPU first notices a grace-period start. + * "cpuqs": CPU passes through a quiescent state. + * "cpuonl": CPU comes online. + * "cpuofl": CPU goes offline. + * "reqwait": GP kthread sleeps waiting for grace-period request. + * "reqwaitsig": GP kthread awakened by signal from reqwait state. + * "fqswait": GP kthread waiting until time to force quiescent states. + * "fqsstart": GP kthread starts forcing quiescent states. + * "fqsend": GP kthread done forcing quiescent states. + * "fqswaitsig": GP kthread awakened by signal from fqswait state. + * "end": End a grace period. + * "cpuend": CPU first notices a grace-period end. + */ +TRACE_EVENT(rcu_grace_period, + + TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), + + TP_ARGS(rcuname, gpnum, gpevent), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(const char *, gpevent) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->gpevent = gpevent; + ), + + TP_printk("%s %lu %s", + __entry->rcuname, __entry->gpnum, __entry->gpevent) +); + +/* + * Tracepoint for future grace-period events, including those for no-callbacks + * CPUs. The caller should pull the data from the rcu_node structure, + * other than rcuname, which comes from the rcu_state structure, and event, + * which is one of the following: + * + * "Startleaf": Request a nocb grace period based on leaf-node data. + * "Startedleaf": Leaf-node start proved sufficient. + * "Startedleafroot": Leaf-node start proved sufficient after checking root. + * "Startedroot": Requested a nocb grace period based on root-node data. + * "StartWait": Start waiting for the requested grace period. + * "ResumeWait": Resume waiting after signal. + * "EndWait": Complete wait. + * "Cleanup": Clean up rcu_node structure after previous GP. + * "CleanupMore": Clean up, and another no-CB GP is needed. + */ +TRACE_EVENT(rcu_future_grace_period, + + TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed, + unsigned long c, u8 level, int grplo, int grphi, + const char *gpevent), + + TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(unsigned long, completed) + __field(unsigned long, c) + __field(u8, level) + __field(int, grplo) + __field(int, grphi) + __field(const char *, gpevent) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->completed = completed; + __entry->c = c; + __entry->level = level; + __entry->grplo = grplo; + __entry->grphi = grphi; + __entry->gpevent = gpevent; + ), + + TP_printk("%s %lu %lu %lu %u %d %d %s", + __entry->rcuname, __entry->gpnum, __entry->completed, + __entry->c, __entry->level, __entry->grplo, __entry->grphi, + __entry->gpevent) +); + +/* + * Tracepoint for grace-period-initialization events. These are + * distinguished by the type of RCU, the new grace-period number, the + * rcu_node structure level, the starting and ending CPU covered by the + * rcu_node structure, and the mask of CPUs that will be waited for. + * All but the type of RCU are extracted from the rcu_node structure. + */ +TRACE_EVENT(rcu_grace_period_init, + + TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, + int grplo, int grphi, unsigned long qsmask), + + TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(u8, level) + __field(int, grplo) + __field(int, grphi) + __field(unsigned long, qsmask) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->level = level; + __entry->grplo = grplo; + __entry->grphi = grphi; + __entry->qsmask = qsmask; + ), + + TP_printk("%s %lu %u %d %d %lx", + __entry->rcuname, __entry->gpnum, __entry->level, + __entry->grplo, __entry->grphi, __entry->qsmask) +); + +/* + * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended + * to assist debugging of these handoffs. + * + * The first argument is the name of the RCU flavor, and the second is + * the number of the offloaded CPU are extracted. The third and final + * argument is a string as follows: + * + * "WakeEmpty": Wake rcuo kthread, first CB to empty list. + * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list. + * "WakeOvf": Wake rcuo kthread, CB list is huge. + * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. + * "WakeNot": Don't wake rcuo kthread. + * "WakeNotPoll": Don't wake rcuo kthread because it is polling. + * "DeferredWake": Carried out the "IsDeferred" wakeup. + * "Poll": Start of new polling cycle for rcu_nocb_poll. + * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. + * "WokeEmpty": rcuo kthread woke to find empty list. + * "WokeNonEmpty": rcuo kthread woke to find non-empty list. + * "WaitQueue": Enqueue partially done, timed wait for it to complete. + * "WokeQueue": Partial enqueue now complete. + */ +TRACE_EVENT(rcu_nocb_wake, + + TP_PROTO(const char *rcuname, int cpu, const char *reason), + + TP_ARGS(rcuname, cpu, reason), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(int, cpu) + __field(const char *, reason) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->cpu = cpu; + __entry->reason = reason; + ), + + TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) +); + +/* + * Tracepoint for tasks blocking within preemptible-RCU read-side + * critical sections. Track the type of RCU (which one day might + * include SRCU), the grace-period number that the task is blocking + * (the current or the next), and the task's PID. + */ +TRACE_EVENT(rcu_preempt_task, + + TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), + + TP_ARGS(rcuname, pid, gpnum), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(int, pid) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->pid = pid; + ), + + TP_printk("%s %lu %d", + __entry->rcuname, __entry->gpnum, __entry->pid) +); + +/* + * Tracepoint for tasks that blocked within a given preemptible-RCU + * read-side critical section exiting that critical section. Track the + * type of RCU (which one day might include SRCU) and the task's PID. + */ +TRACE_EVENT(rcu_unlock_preempted_task, + + TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), + + TP_ARGS(rcuname, gpnum, pid), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(int, pid) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->pid = pid; + ), + + TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) +); + +/* + * Tracepoint for quiescent-state-reporting events. These are + * distinguished by the type of RCU, the grace-period number, the + * mask of quiescent lower-level entities, the rcu_node structure level, + * the starting and ending CPU covered by the rcu_node structure, and + * whether there are any blocked tasks blocking the current grace period. + * All but the type of RCU are extracted from the rcu_node structure. + */ +TRACE_EVENT(rcu_quiescent_state_report, + + TP_PROTO(const char *rcuname, unsigned long gpnum, + unsigned long mask, unsigned long qsmask, + u8 level, int grplo, int grphi, int gp_tasks), + + TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(unsigned long, mask) + __field(unsigned long, qsmask) + __field(u8, level) + __field(int, grplo) + __field(int, grphi) + __field(u8, gp_tasks) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->mask = mask; + __entry->qsmask = qsmask; + __entry->level = level; + __entry->grplo = grplo; + __entry->grphi = grphi; + __entry->gp_tasks = gp_tasks; + ), + + TP_printk("%s %lu %lx>%lx %u %d %d %u", + __entry->rcuname, __entry->gpnum, + __entry->mask, __entry->qsmask, __entry->level, + __entry->grplo, __entry->grphi, __entry->gp_tasks) +); + +/* + * Tracepoint for quiescent states detected by force_quiescent_state(). + * These trace events include the type of RCU, the grace-period number + * that was blocked by the CPU, the CPU itself, and the type of quiescent + * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, + * or "kick" when kicking a CPU that has been in dyntick-idle mode for + * too long. + */ +TRACE_EVENT(rcu_fqs, + + TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), + + TP_ARGS(rcuname, gpnum, cpu, qsevent), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, gpnum) + __field(int, cpu) + __field(const char *, qsevent) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->gpnum = gpnum; + __entry->cpu = cpu; + __entry->qsevent = qsevent; + ), + + TP_printk("%s %lu %d %s", + __entry->rcuname, __entry->gpnum, + __entry->cpu, __entry->qsevent) +); + +#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */ + +/* + * Tracepoint for dyntick-idle entry/exit events. These take a string + * as argument: "Start" for entering dyntick-idle mode, "End" for + * leaving it, "--=" for events moving towards idle, and "++=" for events + * moving away from idle. "Error on entry: not idle task" and "Error on + * exit: not idle task" indicate that a non-idle task is erroneously + * toying with the idle loop. + * + * These events also take a pair of numbers, which indicate the nesting + * depth before and after the event of interest. Note that task-related + * events use the upper bits of each number, while interrupt-related + * events use the lower bits. + */ +TRACE_EVENT(rcu_dyntick, + + TP_PROTO(const char *polarity, long long oldnesting, long long newnesting), + + TP_ARGS(polarity, oldnesting, newnesting), + + TP_STRUCT__entry( + __field(const char *, polarity) + __field(long long, oldnesting) + __field(long long, newnesting) + ), + + TP_fast_assign( + __entry->polarity = polarity; + __entry->oldnesting = oldnesting; + __entry->newnesting = newnesting; + ), + + TP_printk("%s %llx %llx", __entry->polarity, + __entry->oldnesting, __entry->newnesting) +); + +/* + * Tracepoint for RCU preparation for idle, the goal being to get RCU + * processing done so that the current CPU can shut off its scheduling + * clock and enter dyntick-idle mode. One way to accomplish this is + * to drain all RCU callbacks from this CPU, and the other is to have + * done everything RCU requires for the current grace period. In this + * latter case, the CPU will be awakened at the end of the current grace + * period in order to process the remainder of its callbacks. + * + * These tracepoints take a string as argument: + * + * "No callbacks": Nothing to do, no callbacks on this CPU. + * "In holdoff": Nothing to do, holding off after unsuccessful attempt. + * "Begin holdoff": Attempt failed, don't retry until next jiffy. + * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. + * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. + * "More callbacks": Still more callbacks, try again to clear them out. + * "Callbacks drained": All callbacks processed, off to dyntick idle! + * "Timer": Timer fired to cause CPU to continue processing callbacks. + * "Demigrate": Timer fired on wrong CPU, woke up correct CPU. + * "Cleanup after idle": Idle exited, timer canceled. + */ +TRACE_EVENT(rcu_prep_idle, + + TP_PROTO(const char *reason), + + TP_ARGS(reason), + + TP_STRUCT__entry( + __field(const char *, reason) + ), + + TP_fast_assign( + __entry->reason = reason; + ), + + TP_printk("%s", __entry->reason) +); + +/* + * Tracepoint for the registration of a single RCU callback function. + * The first argument is the type of RCU, the second argument is + * a pointer to the RCU callback itself, the third element is the + * number of lazy callbacks queued, and the fourth element is the + * total number of callbacks queued. + */ +TRACE_EVENT(rcu_callback, + + TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, + long qlen), + + TP_ARGS(rcuname, rhp, qlen_lazy, qlen), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(void *, rhp) + __field(void *, func) + __field(long, qlen_lazy) + __field(long, qlen) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->rhp = rhp; + __entry->func = rhp->func; + __entry->qlen_lazy = qlen_lazy; + __entry->qlen = qlen; + ), + + TP_printk("%s rhp=%p func=%pf %ld/%ld", + __entry->rcuname, __entry->rhp, __entry->func, + __entry->qlen_lazy, __entry->qlen) +); + +/* + * Tracepoint for the registration of a single RCU callback of the special + * kfree() form. The first argument is the RCU type, the second argument + * is a pointer to the RCU callback, the third argument is the offset + * of the callback within the enclosing RCU-protected data structure, + * the fourth argument is the number of lazy callbacks queued, and the + * fifth argument is the total number of callbacks queued. + */ +TRACE_EVENT(rcu_kfree_callback, + + TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, + long qlen_lazy, long qlen), + + TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(void *, rhp) + __field(unsigned long, offset) + __field(long, qlen_lazy) + __field(long, qlen) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->rhp = rhp; + __entry->offset = offset; + __entry->qlen_lazy = qlen_lazy; + __entry->qlen = qlen; + ), + + TP_printk("%s rhp=%p func=%ld %ld/%ld", + __entry->rcuname, __entry->rhp, __entry->offset, + __entry->qlen_lazy, __entry->qlen) +); + +/* + * Tracepoint for marking the beginning rcu_do_batch, performed to start + * RCU callback invocation. The first argument is the RCU flavor, + * the second is the number of lazy callbacks queued, the third is + * the total number of callbacks queued, and the fourth argument is + * the current RCU-callback batch limit. + */ +TRACE_EVENT(rcu_batch_start, + + TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), + + TP_ARGS(rcuname, qlen_lazy, qlen, blimit), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(long, qlen_lazy) + __field(long, qlen) + __field(long, blimit) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->qlen_lazy = qlen_lazy; + __entry->qlen = qlen; + __entry->blimit = blimit; + ), + + TP_printk("%s CBs=%ld/%ld bl=%ld", + __entry->rcuname, __entry->qlen_lazy, __entry->qlen, + __entry->blimit) +); + +/* + * Tracepoint for the invocation of a single RCU callback function. + * The first argument is the type of RCU, and the second argument is + * a pointer to the RCU callback itself. + */ +TRACE_EVENT(rcu_invoke_callback, + + TP_PROTO(const char *rcuname, struct rcu_head *rhp), + + TP_ARGS(rcuname, rhp), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(void *, rhp) + __field(void *, func) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->rhp = rhp; + __entry->func = rhp->func; + ), + + TP_printk("%s rhp=%p func=%pf", + __entry->rcuname, __entry->rhp, __entry->func) +); + +/* + * Tracepoint for the invocation of a single RCU callback of the special + * kfree() form. The first argument is the RCU flavor, the second + * argument is a pointer to the RCU callback, and the third argument + * is the offset of the callback within the enclosing RCU-protected + * data structure. + */ +TRACE_EVENT(rcu_invoke_kfree_callback, + + TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), + + TP_ARGS(rcuname, rhp, offset), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(void *, rhp) + __field(unsigned long, offset) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->rhp = rhp; + __entry->offset = offset; + ), + + TP_printk("%s rhp=%p func=%ld", + __entry->rcuname, __entry->rhp, __entry->offset) +); + +/* + * Tracepoint for exiting rcu_do_batch after RCU callbacks have been + * invoked. The first argument is the name of the RCU flavor, + * the second argument is number of callbacks actually invoked, + * the third argument (cb) is whether or not any of the callbacks that + * were ready to invoke at the beginning of this batch are still + * queued, the fourth argument (nr) is the return value of need_resched(), + * the fifth argument (iit) is 1 if the current task is the idle task, + * and the sixth argument (risk) is the return value from + * rcu_is_callbacks_kthread(). + */ +TRACE_EVENT(rcu_batch_end, + + TP_PROTO(const char *rcuname, int callbacks_invoked, + char cb, char nr, char iit, char risk), + + TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(int, callbacks_invoked) + __field(char, cb) + __field(char, nr) + __field(char, iit) + __field(char, risk) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->callbacks_invoked = callbacks_invoked; + __entry->cb = cb; + __entry->nr = nr; + __entry->iit = iit; + __entry->risk = risk; + ), + + TP_printk("%s CBs-invoked=%d idle=%c%c%c%c", + __entry->rcuname, __entry->callbacks_invoked, + __entry->cb ? 'C' : '.', + __entry->nr ? 'S' : '.', + __entry->iit ? 'I' : '.', + __entry->risk ? 'R' : '.') +); + +/* + * Tracepoint for rcutorture readers. The first argument is the name + * of the RCU flavor from rcutorture's viewpoint and the second argument + * is the callback address. + */ +TRACE_EVENT(rcu_torture_read, + + TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, + unsigned long secs, unsigned long c_old, unsigned long c), + + TP_ARGS(rcutorturename, rhp, secs, c_old, c), + + TP_STRUCT__entry( + __field(const char *, rcutorturename) + __field(struct rcu_head *, rhp) + __field(unsigned long, secs) + __field(unsigned long, c_old) + __field(unsigned long, c) + ), + + TP_fast_assign( + __entry->rcutorturename = rcutorturename; + __entry->rhp = rhp; + __entry->secs = secs; + __entry->c_old = c_old; + __entry->c = c; + ), + + TP_printk("%s torture read %p %luus c: %lu %lu", + __entry->rcutorturename, __entry->rhp, + __entry->secs, __entry->c_old, __entry->c) +); + +/* + * Tracepoint for _rcu_barrier() execution. The string "s" describes + * the _rcu_barrier phase: + * "Begin": _rcu_barrier() started. + * "Check": _rcu_barrier() checking for piggybacking. + * "EarlyExit": _rcu_barrier() piggybacked, thus early exit. + * "Inc1": _rcu_barrier() piggyback check counter incremented. + * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU + * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU. + * "OnlineQ": _rcu_barrier() found online CPU with callbacks. + * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks. + * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. + * "CB": An rcu_barrier_callback() invoked a callback, not the last. + * "LastCB": An rcu_barrier_callback() invoked the last callback. + * "Inc2": _rcu_barrier() piggyback check counter incremented. + * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument + * is the count of remaining callbacks, and "done" is the piggybacking count. + */ +TRACE_EVENT(rcu_barrier, + + TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), + + TP_ARGS(rcuname, s, cpu, cnt, done), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(const char *, s) + __field(int, cpu) + __field(int, cnt) + __field(unsigned long, done) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->s = s; + __entry->cpu = cpu; + __entry->cnt = cnt; + __entry->done = done; + ), + + TP_printk("%s %s cpu %d remaining %d # %lu", + __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt, + __entry->done) +); + +#else /* #ifdef CONFIG_RCU_TRACE */ + +#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) +#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ + qsmask) do { } while (0) +#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ + level, grplo, grphi, event) \ + do { } while (0) +#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) +#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) +#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) +#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ + grplo, grphi, gp_tasks) do { } \ + while (0) +#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) +#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) +#define trace_rcu_prep_idle(reason) do { } while (0) +#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) +#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ + do { } while (0) +#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \ + do { } while (0) +#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) +#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) +#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ + do { } while (0) +#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ + do { } while (0) +#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) + +#endif /* #else #ifdef CONFIG_RCU_TRACE */ + +#endif /* _TRACE_RCU_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/regulator.h b/kernel/include/trace/events/regulator.h new file mode 100644 index 000000000..37502a740 --- /dev/null +++ b/kernel/include/trace/events/regulator.h @@ -0,0 +1,141 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM regulator + +#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_REGULATOR_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +/* + * Events which just log themselves and the regulator name for enable/disable + * type tracking. + */ +DECLARE_EVENT_CLASS(regulator_basic, + + TP_PROTO(const char *name), + + TP_ARGS(name), + + TP_STRUCT__entry( + __string( name, name ) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("name=%s", __get_str(name)) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable_delay, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable_complete, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_disable, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_disable_complete, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +/* + * Events that take a range of numerical values, mostly for voltages + * and so on. + */ +DECLARE_EVENT_CLASS(regulator_range, + + TP_PROTO(const char *name, int min, int max), + + TP_ARGS(name, min, max), + + TP_STRUCT__entry( + __string( name, name ) + __field( int, min ) + __field( int, max ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->min = min; + __entry->max = max; + ), + + TP_printk("name=%s (%d-%d)", __get_str(name), + (int)__entry->min, (int)__entry->max) +); + +DEFINE_EVENT(regulator_range, regulator_set_voltage, + + TP_PROTO(const char *name, int min, int max), + + TP_ARGS(name, min, max) + +); + + +/* + * Events that take a single value, mostly for readback and refcounts. + */ +DECLARE_EVENT_CLASS(regulator_value, + + TP_PROTO(const char *name, unsigned int val), + + TP_ARGS(name, val), + + TP_STRUCT__entry( + __string( name, name ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->val = val; + ), + + TP_printk("name=%s, val=%u", __get_str(name), + (int)__entry->val) +); + +DEFINE_EVENT(regulator_value, regulator_set_voltage_complete, + + TP_PROTO(const char *name, unsigned int value), + + TP_ARGS(name, value) + +); + +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/rpm.h b/kernel/include/trace/events/rpm.h new file mode 100644 index 000000000..33f85b68c --- /dev/null +++ b/kernel/include/trace/events/rpm.h @@ -0,0 +1,100 @@ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rpm + +#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RUNTIME_POWER_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +struct device; + +/* + * The rpm_internal events are used for tracing some important + * runtime pm internal functions. + */ +DECLARE_EVENT_CLASS(rpm_internal, + + TP_PROTO(struct device *dev, int flags), + + TP_ARGS(dev, flags), + + TP_STRUCT__entry( + __string( name, dev_name(dev) ) + __field( int, flags ) + __field( int , usage_count ) + __field( int , disable_depth ) + __field( int , runtime_auto ) + __field( int , request_pending ) + __field( int , irq_safe ) + __field( int , child_count ) + ), + + TP_fast_assign( + __assign_str(name, dev_name(dev)); + __entry->flags = flags; + __entry->usage_count = atomic_read( + &dev->power.usage_count); + __entry->disable_depth = dev->power.disable_depth; + __entry->runtime_auto = dev->power.runtime_auto; + __entry->request_pending = dev->power.request_pending; + __entry->irq_safe = dev->power.irq_safe; + __entry->child_count = atomic_read( + &dev->power.child_count); + ), + + TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d" + " irq-%-1d child-%d", + __get_str(name), __entry->flags, + __entry->usage_count, + __entry->disable_depth, + __entry->runtime_auto, + __entry->request_pending, + __entry->irq_safe, + __entry->child_count + ) +); +DEFINE_EVENT(rpm_internal, rpm_suspend, + + TP_PROTO(struct device *dev, int flags), + + TP_ARGS(dev, flags) +); +DEFINE_EVENT(rpm_internal, rpm_resume, + + TP_PROTO(struct device *dev, int flags), + + TP_ARGS(dev, flags) +); +DEFINE_EVENT(rpm_internal, rpm_idle, + + TP_PROTO(struct device *dev, int flags), + + TP_ARGS(dev, flags) +); + +TRACE_EVENT(rpm_return_int, + TP_PROTO(struct device *dev, unsigned long ip, int ret), + TP_ARGS(dev, ip, ret), + + TP_STRUCT__entry( + __string( name, dev_name(dev)) + __field( unsigned long, ip ) + __field( int, ret ) + ), + + TP_fast_assign( + __assign_str(name, dev_name(dev)); + __entry->ip = ip; + __entry->ret = ret; + ), + + TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name), + __entry->ret) +); + +#endif /* _TRACE_RUNTIME_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/sched.h b/kernel/include/trace/events/sched.h new file mode 100644 index 000000000..30fedaf3e --- /dev/null +++ b/kernel/include/trace/events/sched.h @@ -0,0 +1,559 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sched + +#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SCHED_H + +#include <linux/sched.h> +#include <linux/tracepoint.h> +#include <linux/binfmts.h> + +/* + * Tracepoint for calling kthread_stop, performed to end a kthread: + */ +TRACE_EVENT(sched_kthread_stop, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + ), + + TP_fast_assign( + memcpy(__entry->comm, t->comm, TASK_COMM_LEN); + __entry->pid = t->pid; + ), + + TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) +); + +/* + * Tracepoint for the return value of the kthread stopping: + */ +TRACE_EVENT(sched_kthread_stop_ret, + + TP_PROTO(int ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("ret=%d", __entry->ret) +); + +/* + * Tracepoint for waking up a task: + */ +DECLARE_EVENT_CLASS(sched_wakeup_template, + + TP_PROTO(struct task_struct *p, int success), + + TP_ARGS(__perf_task(p), success), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( int, success ) + __field( int, target_cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + __entry->success = success; + __entry->target_cpu = task_cpu(p); + ), + + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", + __entry->comm, __entry->pid, __entry->prio, + __entry->success, __entry->target_cpu) +); + +DEFINE_EVENT(sched_wakeup_template, sched_wakeup, + TP_PROTO(struct task_struct *p, int success), + TP_ARGS(p, success)); + +/* + * Tracepoint for waking up a new task: + */ +DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, + TP_PROTO(struct task_struct *p, int success), + TP_ARGS(p, success)); + +#ifdef CREATE_TRACE_POINTS +static inline long __trace_sched_switch_state(struct task_struct *p) +{ + long state = p->state; + +#ifdef CONFIG_PREEMPT +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + /* + * For all intents and purposes a preempted task is a running task. + */ + if (preempt_count() & PREEMPT_ACTIVE) + state = TASK_RUNNING | TASK_STATE_MAX; +#endif /* CONFIG_PREEMPT */ + + return state; +} +#endif /* CREATE_TRACE_POINTS */ + +/* + * Tracepoint for task switches, performed by the scheduler: + */ +TRACE_EVENT(sched_switch, + + TP_PROTO(struct task_struct *prev, + struct task_struct *next), + + TP_ARGS(prev, next), + + TP_STRUCT__entry( + __array( char, prev_comm, TASK_COMM_LEN ) + __field( pid_t, prev_pid ) + __field( int, prev_prio ) + __field( long, prev_state ) + __array( char, next_comm, TASK_COMM_LEN ) + __field( pid_t, next_pid ) + __field( int, next_prio ) + ), + + TP_fast_assign( + memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); + __entry->prev_pid = prev->pid; + __entry->prev_prio = prev->prio; + __entry->prev_state = __trace_sched_switch_state(prev); + memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); + __entry->next_pid = next->pid; + __entry->next_prio = next->prio; + ), + + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", + __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, + __entry->prev_state & (TASK_STATE_MAX-1) ? + __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", + { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, + { 16, "Z" }, { 32, "X" }, { 64, "x" }, + { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", + __entry->prev_state & TASK_STATE_MAX ? "+" : "", + __entry->next_comm, __entry->next_pid, __entry->next_prio) +); + +/* + * Tracepoint for a task being migrated: + */ +TRACE_EVENT(sched_migrate_task, + + TP_PROTO(struct task_struct *p, int dest_cpu), + + TP_ARGS(p, dest_cpu), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( int, orig_cpu ) + __field( int, dest_cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + __entry->orig_cpu = task_cpu(p); + __entry->dest_cpu = dest_cpu; + ), + + TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", + __entry->comm, __entry->pid, __entry->prio, + __entry->orig_cpu, __entry->dest_cpu) +); + +DECLARE_EVENT_CLASS(sched_process_template, + + TP_PROTO(struct task_struct *p), + + TP_ARGS(p), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + ), + + TP_printk("comm=%s pid=%d prio=%d", + __entry->comm, __entry->pid, __entry->prio) +); + +/* + * Tracepoint for freeing a task: + */ +DEFINE_EVENT(sched_process_template, sched_process_free, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + + +/* + * Tracepoint for a task exiting: + */ +DEFINE_EVENT(sched_process_template, sched_process_exit, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + +/* + * Tracepoint for waiting on task to unschedule: + */ +DEFINE_EVENT(sched_process_template, sched_wait_task, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + +/* + * Tracepoint for a waiting task: + */ +TRACE_EVENT(sched_process_wait, + + TP_PROTO(struct pid *pid), + + TP_ARGS(pid), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + __entry->pid = pid_nr(pid); + __entry->prio = current->prio; + ), + + TP_printk("comm=%s pid=%d prio=%d", + __entry->comm, __entry->pid, __entry->prio) +); + +/* + * Tracepoint for do_fork: + */ +TRACE_EVENT(sched_process_fork, + + TP_PROTO(struct task_struct *parent, struct task_struct *child), + + TP_ARGS(parent, child), + + TP_STRUCT__entry( + __array( char, parent_comm, TASK_COMM_LEN ) + __field( pid_t, parent_pid ) + __array( char, child_comm, TASK_COMM_LEN ) + __field( pid_t, child_pid ) + ), + + TP_fast_assign( + memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); + __entry->parent_pid = parent->pid; + memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); + __entry->child_pid = child->pid; + ), + + TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", + __entry->parent_comm, __entry->parent_pid, + __entry->child_comm, __entry->child_pid) +); + +/* + * Tracepoint for exec: + */ +TRACE_EVENT(sched_process_exec, + + TP_PROTO(struct task_struct *p, pid_t old_pid, + struct linux_binprm *bprm), + + TP_ARGS(p, old_pid, bprm), + + TP_STRUCT__entry( + __string( filename, bprm->filename ) + __field( pid_t, pid ) + __field( pid_t, old_pid ) + ), + + TP_fast_assign( + __assign_str(filename, bprm->filename); + __entry->pid = p->pid; + __entry->old_pid = old_pid; + ), + + TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), + __entry->pid, __entry->old_pid) +); + +/* + * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE + * adding sched_stat support to SCHED_FIFO/RR would be welcome. + */ +DECLARE_EVENT_CLASS(sched_stat_template, + + TP_PROTO(struct task_struct *tsk, u64 delay), + + TP_ARGS(__perf_task(tsk), __perf_count(delay)), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( u64, delay ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->delay = delay; + ), + + TP_printk("comm=%s pid=%d delay=%Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->delay) +); + + +/* + * Tracepoint for accounting wait time (time the task is runnable + * but not actually running due to scheduler contention). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_wait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting sleep time (time the task is not runnable, + * including iowait, see below). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_sleep, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting iowait time (time the task is not runnable + * due to waiting on IO to complete). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_iowait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting blocked time (time the task is in uninterruptible). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_blocked, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting runtime (time the task is executing + * on a CPU). + */ +DECLARE_EVENT_CLASS(sched_stat_runtime, + + TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), + + TP_ARGS(tsk, __perf_count(runtime), vruntime), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( u64, runtime ) + __field( u64, vruntime ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->runtime = runtime; + __entry->vruntime = vruntime; + ), + + TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->runtime, + (unsigned long long)__entry->vruntime) +); + +DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, + TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), + TP_ARGS(tsk, runtime, vruntime)); + +/* + * Tracepoint for showing priority inheritance modifying a tasks + * priority. + */ +TRACE_EVENT(sched_pi_setprio, + + TP_PROTO(struct task_struct *tsk, int newprio), + + TP_ARGS(tsk, newprio), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, oldprio ) + __field( int, newprio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->oldprio = tsk->prio; + __entry->newprio = newprio; + ), + + TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", + __entry->comm, __entry->pid, + __entry->oldprio, __entry->newprio) +); + +#ifdef CONFIG_DETECT_HUNG_TASK +TRACE_EVENT(sched_process_hang, + TP_PROTO(struct task_struct *tsk), + TP_ARGS(tsk), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + ), + + TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) +); +#endif /* CONFIG_DETECT_HUNG_TASK */ + +DECLARE_EVENT_CLASS(sched_move_task_template, + + TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), + + TP_ARGS(tsk, src_cpu, dst_cpu), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( pid_t, tgid ) + __field( pid_t, ngid ) + __field( int, src_cpu ) + __field( int, src_nid ) + __field( int, dst_cpu ) + __field( int, dst_nid ) + ), + + TP_fast_assign( + __entry->pid = task_pid_nr(tsk); + __entry->tgid = task_tgid_nr(tsk); + __entry->ngid = task_numa_group_id(tsk); + __entry->src_cpu = src_cpu; + __entry->src_nid = cpu_to_node(src_cpu); + __entry->dst_cpu = dst_cpu; + __entry->dst_nid = cpu_to_node(dst_cpu); + ), + + TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", + __entry->pid, __entry->tgid, __entry->ngid, + __entry->src_cpu, __entry->src_nid, + __entry->dst_cpu, __entry->dst_nid) +); + +/* + * Tracks migration of tasks from one runqueue to another. Can be used to + * detect if automatic NUMA balancing is bouncing between nodes + */ +DEFINE_EVENT(sched_move_task_template, sched_move_numa, + TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), + + TP_ARGS(tsk, src_cpu, dst_cpu) +); + +DEFINE_EVENT(sched_move_task_template, sched_stick_numa, + TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), + + TP_ARGS(tsk, src_cpu, dst_cpu) +); + +TRACE_EVENT(sched_swap_numa, + + TP_PROTO(struct task_struct *src_tsk, int src_cpu, + struct task_struct *dst_tsk, int dst_cpu), + + TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu), + + TP_STRUCT__entry( + __field( pid_t, src_pid ) + __field( pid_t, src_tgid ) + __field( pid_t, src_ngid ) + __field( int, src_cpu ) + __field( int, src_nid ) + __field( pid_t, dst_pid ) + __field( pid_t, dst_tgid ) + __field( pid_t, dst_ngid ) + __field( int, dst_cpu ) + __field( int, dst_nid ) + ), + + TP_fast_assign( + __entry->src_pid = task_pid_nr(src_tsk); + __entry->src_tgid = task_tgid_nr(src_tsk); + __entry->src_ngid = task_numa_group_id(src_tsk); + __entry->src_cpu = src_cpu; + __entry->src_nid = cpu_to_node(src_cpu); + __entry->dst_pid = task_pid_nr(dst_tsk); + __entry->dst_tgid = task_tgid_nr(dst_tsk); + __entry->dst_ngid = task_numa_group_id(dst_tsk); + __entry->dst_cpu = dst_cpu; + __entry->dst_nid = cpu_to_node(dst_cpu); + ), + + TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", + __entry->src_pid, __entry->src_tgid, __entry->src_ngid, + __entry->src_cpu, __entry->src_nid, + __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, + __entry->dst_cpu, __entry->dst_nid) +); + +/* + * Tracepoint for waking a polling cpu without an IPI. + */ +TRACE_EVENT(sched_wake_idle_without_ipi, + + TP_PROTO(int cpu), + + TP_ARGS(cpu), + + TP_STRUCT__entry( + __field( int, cpu ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + ), + + TP_printk("cpu=%d", __entry->cpu) +); +#endif /* _TRACE_SCHED_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/scsi.h b/kernel/include/trace/events/scsi.h new file mode 100644 index 000000000..079bd10a0 --- /dev/null +++ b/kernel/include/trace/events/scsi.h @@ -0,0 +1,365 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM scsi + +#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SCSI_H + +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +#define scsi_opcode_name(opcode) { opcode, #opcode } +#define show_opcode_name(val) \ + __print_symbolic(val, \ + scsi_opcode_name(TEST_UNIT_READY), \ + scsi_opcode_name(REZERO_UNIT), \ + scsi_opcode_name(REQUEST_SENSE), \ + scsi_opcode_name(FORMAT_UNIT), \ + scsi_opcode_name(READ_BLOCK_LIMITS), \ + scsi_opcode_name(REASSIGN_BLOCKS), \ + scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \ + scsi_opcode_name(READ_6), \ + scsi_opcode_name(WRITE_6), \ + scsi_opcode_name(SEEK_6), \ + scsi_opcode_name(READ_REVERSE), \ + scsi_opcode_name(WRITE_FILEMARKS), \ + scsi_opcode_name(SPACE), \ + scsi_opcode_name(INQUIRY), \ + scsi_opcode_name(RECOVER_BUFFERED_DATA), \ + scsi_opcode_name(MODE_SELECT), \ + scsi_opcode_name(RESERVE), \ + scsi_opcode_name(RELEASE), \ + scsi_opcode_name(COPY), \ + scsi_opcode_name(ERASE), \ + scsi_opcode_name(MODE_SENSE), \ + scsi_opcode_name(START_STOP), \ + scsi_opcode_name(RECEIVE_DIAGNOSTIC), \ + scsi_opcode_name(SEND_DIAGNOSTIC), \ + scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \ + scsi_opcode_name(SET_WINDOW), \ + scsi_opcode_name(READ_CAPACITY), \ + scsi_opcode_name(READ_10), \ + scsi_opcode_name(WRITE_10), \ + scsi_opcode_name(SEEK_10), \ + scsi_opcode_name(POSITION_TO_ELEMENT), \ + scsi_opcode_name(WRITE_VERIFY), \ + scsi_opcode_name(VERIFY), \ + scsi_opcode_name(SEARCH_HIGH), \ + scsi_opcode_name(SEARCH_EQUAL), \ + scsi_opcode_name(SEARCH_LOW), \ + scsi_opcode_name(SET_LIMITS), \ + scsi_opcode_name(PRE_FETCH), \ + scsi_opcode_name(READ_POSITION), \ + scsi_opcode_name(SYNCHRONIZE_CACHE), \ + scsi_opcode_name(LOCK_UNLOCK_CACHE), \ + scsi_opcode_name(READ_DEFECT_DATA), \ + scsi_opcode_name(MEDIUM_SCAN), \ + scsi_opcode_name(COMPARE), \ + scsi_opcode_name(COPY_VERIFY), \ + scsi_opcode_name(WRITE_BUFFER), \ + scsi_opcode_name(READ_BUFFER), \ + scsi_opcode_name(UPDATE_BLOCK), \ + scsi_opcode_name(READ_LONG), \ + scsi_opcode_name(WRITE_LONG), \ + scsi_opcode_name(CHANGE_DEFINITION), \ + scsi_opcode_name(WRITE_SAME), \ + scsi_opcode_name(UNMAP), \ + scsi_opcode_name(READ_TOC), \ + scsi_opcode_name(LOG_SELECT), \ + scsi_opcode_name(LOG_SENSE), \ + scsi_opcode_name(XDWRITEREAD_10), \ + scsi_opcode_name(MODE_SELECT_10), \ + scsi_opcode_name(RESERVE_10), \ + scsi_opcode_name(RELEASE_10), \ + scsi_opcode_name(MODE_SENSE_10), \ + scsi_opcode_name(PERSISTENT_RESERVE_IN), \ + scsi_opcode_name(PERSISTENT_RESERVE_OUT), \ + scsi_opcode_name(VARIABLE_LENGTH_CMD), \ + scsi_opcode_name(REPORT_LUNS), \ + scsi_opcode_name(MAINTENANCE_IN), \ + scsi_opcode_name(MAINTENANCE_OUT), \ + scsi_opcode_name(MOVE_MEDIUM), \ + scsi_opcode_name(EXCHANGE_MEDIUM), \ + scsi_opcode_name(READ_12), \ + scsi_opcode_name(WRITE_12), \ + scsi_opcode_name(WRITE_VERIFY_12), \ + scsi_opcode_name(SEARCH_HIGH_12), \ + scsi_opcode_name(SEARCH_EQUAL_12), \ + scsi_opcode_name(SEARCH_LOW_12), \ + scsi_opcode_name(READ_ELEMENT_STATUS), \ + scsi_opcode_name(SEND_VOLUME_TAG), \ + scsi_opcode_name(WRITE_LONG_2), \ + scsi_opcode_name(READ_16), \ + scsi_opcode_name(WRITE_16), \ + scsi_opcode_name(VERIFY_16), \ + scsi_opcode_name(WRITE_SAME_16), \ + scsi_opcode_name(SERVICE_ACTION_IN_16), \ + scsi_opcode_name(SAI_READ_CAPACITY_16), \ + scsi_opcode_name(SAI_GET_LBA_STATUS), \ + scsi_opcode_name(MI_REPORT_TARGET_PGS), \ + scsi_opcode_name(MO_SET_TARGET_PGS), \ + scsi_opcode_name(READ_32), \ + scsi_opcode_name(WRITE_32), \ + scsi_opcode_name(WRITE_SAME_32), \ + scsi_opcode_name(ATA_16), \ + scsi_opcode_name(ATA_12)) + +#define scsi_hostbyte_name(result) { result, #result } +#define show_hostbyte_name(val) \ + __print_symbolic(val, \ + scsi_hostbyte_name(DID_OK), \ + scsi_hostbyte_name(DID_NO_CONNECT), \ + scsi_hostbyte_name(DID_BUS_BUSY), \ + scsi_hostbyte_name(DID_TIME_OUT), \ + scsi_hostbyte_name(DID_BAD_TARGET), \ + scsi_hostbyte_name(DID_ABORT), \ + scsi_hostbyte_name(DID_PARITY), \ + scsi_hostbyte_name(DID_ERROR), \ + scsi_hostbyte_name(DID_RESET), \ + scsi_hostbyte_name(DID_BAD_INTR), \ + scsi_hostbyte_name(DID_PASSTHROUGH), \ + scsi_hostbyte_name(DID_SOFT_ERROR), \ + scsi_hostbyte_name(DID_IMM_RETRY), \ + scsi_hostbyte_name(DID_REQUEUE), \ + scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \ + scsi_hostbyte_name(DID_TRANSPORT_FAILFAST)) + +#define scsi_driverbyte_name(result) { result, #result } +#define show_driverbyte_name(val) \ + __print_symbolic(val, \ + scsi_driverbyte_name(DRIVER_OK), \ + scsi_driverbyte_name(DRIVER_BUSY), \ + scsi_driverbyte_name(DRIVER_SOFT), \ + scsi_driverbyte_name(DRIVER_MEDIA), \ + scsi_driverbyte_name(DRIVER_ERROR), \ + scsi_driverbyte_name(DRIVER_INVALID), \ + scsi_driverbyte_name(DRIVER_TIMEOUT), \ + scsi_driverbyte_name(DRIVER_HARD), \ + scsi_driverbyte_name(DRIVER_SENSE)) + +#define scsi_msgbyte_name(result) { result, #result } +#define show_msgbyte_name(val) \ + __print_symbolic(val, \ + scsi_msgbyte_name(COMMAND_COMPLETE), \ + scsi_msgbyte_name(EXTENDED_MESSAGE), \ + scsi_msgbyte_name(SAVE_POINTERS), \ + scsi_msgbyte_name(RESTORE_POINTERS), \ + scsi_msgbyte_name(DISCONNECT), \ + scsi_msgbyte_name(INITIATOR_ERROR), \ + scsi_msgbyte_name(ABORT_TASK_SET), \ + scsi_msgbyte_name(MESSAGE_REJECT), \ + scsi_msgbyte_name(NOP), \ + scsi_msgbyte_name(MSG_PARITY_ERROR), \ + scsi_msgbyte_name(LINKED_CMD_COMPLETE), \ + scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \ + scsi_msgbyte_name(TARGET_RESET), \ + scsi_msgbyte_name(ABORT_TASK), \ + scsi_msgbyte_name(CLEAR_TASK_SET), \ + scsi_msgbyte_name(INITIATE_RECOVERY), \ + scsi_msgbyte_name(RELEASE_RECOVERY), \ + scsi_msgbyte_name(CLEAR_ACA), \ + scsi_msgbyte_name(LOGICAL_UNIT_RESET), \ + scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \ + scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \ + scsi_msgbyte_name(ORDERED_QUEUE_TAG), \ + scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \ + scsi_msgbyte_name(ACA), \ + scsi_msgbyte_name(QAS_REQUEST), \ + scsi_msgbyte_name(BUS_DEVICE_RESET), \ + scsi_msgbyte_name(ABORT)) + +#define scsi_statusbyte_name(result) { result, #result } +#define show_statusbyte_name(val) \ + __print_symbolic(val, \ + scsi_statusbyte_name(SAM_STAT_GOOD), \ + scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \ + scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \ + scsi_statusbyte_name(SAM_STAT_BUSY), \ + scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \ + scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \ + scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \ + scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \ + scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \ + scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \ + scsi_statusbyte_name(SAM_STAT_TASK_ABORTED)) + +#define scsi_prot_op_name(result) { result, #result } +#define show_prot_op_name(val) \ + __print_symbolic(val, \ + scsi_prot_op_name(SCSI_PROT_NORMAL), \ + scsi_prot_op_name(SCSI_PROT_READ_INSERT), \ + scsi_prot_op_name(SCSI_PROT_WRITE_STRIP), \ + scsi_prot_op_name(SCSI_PROT_READ_STRIP), \ + scsi_prot_op_name(SCSI_PROT_WRITE_INSERT), \ + scsi_prot_op_name(SCSI_PROT_READ_PASS), \ + scsi_prot_op_name(SCSI_PROT_WRITE_PASS)) + +const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int); +#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len) + +TRACE_EVENT(scsi_dispatch_cmd_start, + + TP_PROTO(struct scsi_cmnd *cmd), + + TP_ARGS(cmd), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( unsigned int, opcode ) + __field( unsigned int, cmd_len ) + __field( unsigned int, data_sglen ) + __field( unsigned int, prot_sglen ) + __field( unsigned char, prot_op ) + __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + ), + + TP_fast_assign( + __entry->host_no = cmd->device->host->host_no; + __entry->channel = cmd->device->channel; + __entry->id = cmd->device->id; + __entry->lun = cmd->device->lun; + __entry->opcode = cmd->cmnd[0]; + __entry->cmd_len = cmd->cmd_len; + __entry->data_sglen = scsi_sg_count(cmd); + __entry->prot_sglen = scsi_prot_sg_count(cmd); + __entry->prot_op = scsi_get_prot_op(cmd); + memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + ), + + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ + " prot_op=%s cmnd=(%s %s raw=%s)", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->data_sglen, __entry->prot_sglen, + show_prot_op_name(__entry->prot_op), + show_opcode_name(__entry->opcode), + __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), + __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len)) +); + +TRACE_EVENT(scsi_dispatch_cmd_error, + + TP_PROTO(struct scsi_cmnd *cmd, int rtn), + + TP_ARGS(cmd, rtn), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( int, rtn ) + __field( unsigned int, opcode ) + __field( unsigned int, cmd_len ) + __field( unsigned int, data_sglen ) + __field( unsigned int, prot_sglen ) + __field( unsigned char, prot_op ) + __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + ), + + TP_fast_assign( + __entry->host_no = cmd->device->host->host_no; + __entry->channel = cmd->device->channel; + __entry->id = cmd->device->id; + __entry->lun = cmd->device->lun; + __entry->rtn = rtn; + __entry->opcode = cmd->cmnd[0]; + __entry->cmd_len = cmd->cmd_len; + __entry->data_sglen = scsi_sg_count(cmd); + __entry->prot_sglen = scsi_prot_sg_count(cmd); + __entry->prot_op = scsi_get_prot_op(cmd); + memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + ), + + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ + " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->data_sglen, __entry->prot_sglen, + show_prot_op_name(__entry->prot_op), + show_opcode_name(__entry->opcode), + __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), + __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), + __entry->rtn) +); + +DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, + + TP_PROTO(struct scsi_cmnd *cmd), + + TP_ARGS(cmd), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( int, result ) + __field( unsigned int, opcode ) + __field( unsigned int, cmd_len ) + __field( unsigned int, data_sglen ) + __field( unsigned int, prot_sglen ) + __field( unsigned char, prot_op ) + __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + ), + + TP_fast_assign( + __entry->host_no = cmd->device->host->host_no; + __entry->channel = cmd->device->channel; + __entry->id = cmd->device->id; + __entry->lun = cmd->device->lun; + __entry->result = cmd->result; + __entry->opcode = cmd->cmnd[0]; + __entry->cmd_len = cmd->cmd_len; + __entry->data_sglen = scsi_sg_count(cmd); + __entry->prot_sglen = scsi_prot_sg_count(cmd); + __entry->prot_op = scsi_get_prot_op(cmd); + memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + ), + + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \ + "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \ + "%s host=%s message=%s status=%s)", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->data_sglen, __entry->prot_sglen, + show_prot_op_name(__entry->prot_op), + show_opcode_name(__entry->opcode), + __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), + __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), + show_driverbyte_name(((__entry->result) >> 24) & 0xff), + show_hostbyte_name(((__entry->result) >> 16) & 0xff), + show_msgbyte_name(((__entry->result) >> 8) & 0xff), + show_statusbyte_name(__entry->result & 0xff)) +); + +DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done, + TP_PROTO(struct scsi_cmnd *cmd), + TP_ARGS(cmd)); + +DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout, + TP_PROTO(struct scsi_cmnd *cmd), + TP_ARGS(cmd)); + +TRACE_EVENT(scsi_eh_wakeup, + + TP_PROTO(struct Scsi_Host *shost), + + TP_ARGS(shost), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + ), + + TP_fast_assign( + __entry->host_no = shost->host_no; + ), + + TP_printk("host_no=%u", __entry->host_no) +); + +#endif /* _TRACE_SCSI_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/signal.h b/kernel/include/trace/events/signal.h new file mode 100644 index 000000000..39a8a430d --- /dev/null +++ b/kernel/include/trace/events/signal.h @@ -0,0 +1,125 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM signal + +#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SIGNAL_H + +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/tracepoint.h> + +#define TP_STORE_SIGINFO(__entry, info) \ + do { \ + if (info == SEND_SIG_NOINFO || \ + info == SEND_SIG_FORCED) { \ + __entry->errno = 0; \ + __entry->code = SI_USER; \ + } else if (info == SEND_SIG_PRIV) { \ + __entry->errno = 0; \ + __entry->code = SI_KERNEL; \ + } else { \ + __entry->errno = info->si_errno; \ + __entry->code = info->si_code; \ + } \ + } while (0) + +#ifndef TRACE_HEADER_MULTI_READ +enum { + TRACE_SIGNAL_DELIVERED, + TRACE_SIGNAL_IGNORED, + TRACE_SIGNAL_ALREADY_PENDING, + TRACE_SIGNAL_OVERFLOW_FAIL, + TRACE_SIGNAL_LOSE_INFO, +}; +#endif + +/** + * signal_generate - called when a signal is generated + * @sig: signal number + * @info: pointer to struct siginfo + * @task: pointer to struct task_struct + * @group: shared or private + * @result: TRACE_SIGNAL_* + * + * Current process sends a 'sig' signal to 'task' process with + * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, + * 'info' is not a pointer and you can't access its field. Instead, + * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV + * means that si_code is SI_KERNEL. + */ +TRACE_EVENT(signal_generate, + + TP_PROTO(int sig, struct siginfo *info, struct task_struct *task, + int group, int result), + + TP_ARGS(sig, info, task, group, result), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, errno ) + __field( int, code ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, group ) + __field( int, result ) + ), + + TP_fast_assign( + __entry->sig = sig; + TP_STORE_SIGINFO(__entry, info); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task->pid; + __entry->group = group; + __entry->result = result; + ), + + TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d", + __entry->sig, __entry->errno, __entry->code, + __entry->comm, __entry->pid, __entry->group, + __entry->result) +); + +/** + * signal_deliver - called when a signal is delivered + * @sig: signal number + * @info: pointer to struct siginfo + * @ka: pointer to struct k_sigaction + * + * A 'sig' signal is delivered to current process with 'info' siginfo, + * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or + * SIG_DFL. + * Note that some signals reported by signal_generate tracepoint can be + * lost, ignored or modified (by debugger) before hitting this tracepoint. + * This means, this can show which signals are actually delivered, but + * matching generated signals and delivered signals may not be correct. + */ +TRACE_EVENT(signal_deliver, + + TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka), + + TP_ARGS(sig, info, ka), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, errno ) + __field( int, code ) + __field( unsigned long, sa_handler ) + __field( unsigned long, sa_flags ) + ), + + TP_fast_assign( + __entry->sig = sig; + TP_STORE_SIGINFO(__entry, info); + __entry->sa_handler = (unsigned long)ka->sa.sa_handler; + __entry->sa_flags = ka->sa.sa_flags; + ), + + TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx", + __entry->sig, __entry->errno, __entry->code, + __entry->sa_handler, __entry->sa_flags) +); + +#endif /* _TRACE_SIGNAL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/skb.h b/kernel/include/trace/events/skb.h new file mode 100644 index 000000000..0c68ae22d --- /dev/null +++ b/kernel/include/trace/events/skb.h @@ -0,0 +1,75 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM skb + +#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SKB_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/tracepoint.h> + +/* + * Tracepoint for free an sk_buff: + */ +TRACE_EVENT(kfree_skb, + + TP_PROTO(struct sk_buff *skb, void *location), + + TP_ARGS(skb, location), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + __field( void *, location ) + __field( unsigned short, protocol ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->location = location; + __entry->protocol = ntohs(skb->protocol); + ), + + TP_printk("skbaddr=%p protocol=%u location=%p", + __entry->skbaddr, __entry->protocol, __entry->location) +); + +TRACE_EVENT(consume_skb, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + ), + + TP_printk("skbaddr=%p", __entry->skbaddr) +); + +TRACE_EVENT(skb_copy_datagram_iovec, + + TP_PROTO(const struct sk_buff *skb, int len), + + TP_ARGS(skb, len), + + TP_STRUCT__entry( + __field( const void *, skbaddr ) + __field( int, len ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = len; + ), + + TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len) +); + +#endif /* _TRACE_SKB_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/sock.h b/kernel/include/trace/events/sock.h new file mode 100644 index 000000000..779abb91d --- /dev/null +++ b/kernel/include/trace/events/sock.h @@ -0,0 +1,68 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sock + +#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SOCK_H + +#include <net/sock.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(sock_rcvqueue_full, + + TP_PROTO(struct sock *sk, struct sk_buff *skb), + + TP_ARGS(sk, skb), + + TP_STRUCT__entry( + __field(int, rmem_alloc) + __field(unsigned int, truesize) + __field(int, sk_rcvbuf) + ), + + TP_fast_assign( + __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); + __entry->truesize = skb->truesize; + __entry->sk_rcvbuf = sk->sk_rcvbuf; + ), + + TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d", + __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf) +); + +TRACE_EVENT(sock_exceed_buf_limit, + + TP_PROTO(struct sock *sk, struct proto *prot, long allocated), + + TP_ARGS(sk, prot, allocated), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(long *, sysctl_mem) + __field(long, allocated) + __field(int, sysctl_rmem) + __field(int, rmem_alloc) + ), + + TP_fast_assign( + strncpy(__entry->name, prot->name, 32); + __entry->sysctl_mem = prot->sysctl_mem; + __entry->allocated = allocated; + __entry->sysctl_rmem = prot->sysctl_rmem[0]; + __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); + ), + + TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " + "sysctl_rmem=%d rmem_alloc=%d", + __entry->name, + __entry->sysctl_mem[0], + __entry->sysctl_mem[1], + __entry->sysctl_mem[2], + __entry->allocated, + __entry->sysctl_rmem, + __entry->rmem_alloc) +); + +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/spi.h b/kernel/include/trace/events/spi.h new file mode 100644 index 000000000..7e02c983b --- /dev/null +++ b/kernel/include/trace/events/spi.h @@ -0,0 +1,156 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM spi + +#if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SPI_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(spi_master, + + TP_PROTO(struct spi_master *master), + + TP_ARGS(master), + + TP_STRUCT__entry( + __field( int, bus_num ) + ), + + TP_fast_assign( + __entry->bus_num = master->bus_num; + ), + + TP_printk("spi%d", (int)__entry->bus_num) + +); + +DEFINE_EVENT(spi_master, spi_master_idle, + + TP_PROTO(struct spi_master *master), + + TP_ARGS(master) + +); + +DEFINE_EVENT(spi_master, spi_master_busy, + + TP_PROTO(struct spi_master *master), + + TP_ARGS(master) + +); + +DECLARE_EVENT_CLASS(spi_message, + + TP_PROTO(struct spi_message *msg), + + TP_ARGS(msg), + + TP_STRUCT__entry( + __field( int, bus_num ) + __field( int, chip_select ) + __field( struct spi_message *, msg ) + ), + + TP_fast_assign( + __entry->bus_num = msg->spi->master->bus_num; + __entry->chip_select = msg->spi->chip_select; + __entry->msg = msg; + ), + + TP_printk("spi%d.%d %p", (int)__entry->bus_num, + (int)__entry->chip_select, + (struct spi_message *)__entry->msg) +); + +DEFINE_EVENT(spi_message, spi_message_submit, + + TP_PROTO(struct spi_message *msg), + + TP_ARGS(msg) + +); + +DEFINE_EVENT(spi_message, spi_message_start, + + TP_PROTO(struct spi_message *msg), + + TP_ARGS(msg) + +); + +TRACE_EVENT(spi_message_done, + + TP_PROTO(struct spi_message *msg), + + TP_ARGS(msg), + + TP_STRUCT__entry( + __field( int, bus_num ) + __field( int, chip_select ) + __field( struct spi_message *, msg ) + __field( unsigned, frame ) + __field( unsigned, actual ) + ), + + TP_fast_assign( + __entry->bus_num = msg->spi->master->bus_num; + __entry->chip_select = msg->spi->chip_select; + __entry->msg = msg; + __entry->frame = msg->frame_length; + __entry->actual = msg->actual_length; + ), + + TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num, + (int)__entry->chip_select, + (struct spi_message *)__entry->msg, + (unsigned)__entry->actual, (unsigned)__entry->frame) +); + +DECLARE_EVENT_CLASS(spi_transfer, + + TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), + + TP_ARGS(msg, xfer), + + TP_STRUCT__entry( + __field( int, bus_num ) + __field( int, chip_select ) + __field( struct spi_transfer *, xfer ) + __field( int, len ) + ), + + TP_fast_assign( + __entry->bus_num = msg->spi->master->bus_num; + __entry->chip_select = msg->spi->chip_select; + __entry->xfer = xfer; + __entry->len = xfer->len; + ), + + TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num, + (int)__entry->chip_select, + (struct spi_message *)__entry->xfer, + (int)__entry->len) +); + +DEFINE_EVENT(spi_transfer, spi_transfer_start, + + TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), + + TP_ARGS(msg, xfer) + +); + +DEFINE_EVENT(spi_transfer, spi_transfer_stop, + + TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), + + TP_ARGS(msg, xfer) + +); + +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/sunrpc.h b/kernel/include/trace/events/sunrpc.h new file mode 100644 index 000000000..fd1a02cb3 --- /dev/null +++ b/kernel/include/trace/events/sunrpc.h @@ -0,0 +1,605 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sunrpc + +#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SUNRPC_H + +#include <linux/sunrpc/sched.h> +#include <linux/sunrpc/clnt.h> +#include <linux/sunrpc/svc.h> +#include <linux/sunrpc/xprtsock.h> +#include <linux/sunrpc/svc_xprt.h> +#include <net/tcp_states.h> +#include <linux/net.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(rpc_task_status, + + TP_PROTO(struct rpc_task *task), + + TP_ARGS(task), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(int, status) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->status = task->tk_status; + ), + + TP_printk("task:%u@%u, status %d", + __entry->task_id, __entry->client_id, + __entry->status) +); + +DEFINE_EVENT(rpc_task_status, rpc_call_status, + TP_PROTO(struct rpc_task *task), + + TP_ARGS(task) +); + +DEFINE_EVENT(rpc_task_status, rpc_bind_status, + TP_PROTO(struct rpc_task *task), + + TP_ARGS(task) +); + +TRACE_EVENT(rpc_connect_status, + TP_PROTO(struct rpc_task *task, int status), + + TP_ARGS(task, status), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(int, status) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->status = status; + ), + + TP_printk("task:%u@%u, status %d", + __entry->task_id, __entry->client_id, + __entry->status) +); + +DECLARE_EVENT_CLASS(rpc_task_running, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + + TP_ARGS(clnt, task, action), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, action) + __field(unsigned long, runstate) + __field(int, status) + __field(unsigned short, flags) + ), + + TP_fast_assign( + __entry->client_id = clnt ? clnt->cl_clid : -1; + __entry->task_id = task->tk_pid; + __entry->action = action; + __entry->runstate = task->tk_runstate; + __entry->status = task->tk_status; + __entry->flags = task->tk_flags; + ), + + TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf", + __entry->task_id, __entry->client_id, + __entry->flags, + __entry->runstate, + __entry->status, + __entry->action + ) +); + +DEFINE_EVENT(rpc_task_running, rpc_task_begin, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + + TP_ARGS(clnt, task, action) + +); + +DEFINE_EVENT(rpc_task_running, rpc_task_run_action, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + + TP_ARGS(clnt, task, action) + +); + +DEFINE_EVENT(rpc_task_running, rpc_task_complete, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + + TP_ARGS(clnt, task, action) + +); + +DECLARE_EVENT_CLASS(rpc_task_queued, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + + TP_ARGS(clnt, task, q), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(unsigned long, timeout) + __field(unsigned long, runstate) + __field(int, status) + __field(unsigned short, flags) + __string(q_name, rpc_qname(q)) + ), + + TP_fast_assign( + __entry->client_id = clnt->cl_clid; + __entry->task_id = task->tk_pid; + __entry->timeout = task->tk_timeout; + __entry->runstate = task->tk_runstate; + __entry->status = task->tk_status; + __entry->flags = task->tk_flags; + __assign_str(q_name, rpc_qname(q)); + ), + + TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s", + __entry->task_id, __entry->client_id, + __entry->flags, + __entry->runstate, + __entry->status, + __entry->timeout, + __get_str(q_name) + ) +); + +DEFINE_EVENT(rpc_task_queued, rpc_task_sleep, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + + TP_ARGS(clnt, task, q) + +); + +DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup, + + TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + + TP_ARGS(clnt, task, q) + +); + +/* + * First define the enums in the below macros to be exported to userspace + * via TRACE_DEFINE_ENUM(). + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +#define RPC_SHOW_SOCKET \ + EM( SS_FREE, "FREE" ) \ + EM( SS_UNCONNECTED, "UNCONNECTED" ) \ + EM( SS_CONNECTING, "CONNECTING," ) \ + EM( SS_CONNECTED, "CONNECTED," ) \ + EMe(SS_DISCONNECTING, "DISCONNECTING" ) + +#define rpc_show_socket_state(state) \ + __print_symbolic(state, RPC_SHOW_SOCKET) + +RPC_SHOW_SOCKET + +#define RPC_SHOW_SOCK \ + EM( TCP_ESTABLISHED, "ESTABLISHED" ) \ + EM( TCP_SYN_SENT, "SYN_SENT" ) \ + EM( TCP_SYN_RECV, "SYN_RECV" ) \ + EM( TCP_FIN_WAIT1, "FIN_WAIT1" ) \ + EM( TCP_FIN_WAIT2, "FIN_WAIT2" ) \ + EM( TCP_TIME_WAIT, "TIME_WAIT" ) \ + EM( TCP_CLOSE, "CLOSE" ) \ + EM( TCP_CLOSE_WAIT, "CLOSE_WAIT" ) \ + EM( TCP_LAST_ACK, "LAST_ACK" ) \ + EM( TCP_LISTEN, "LISTEN" ) \ + EMe( TCP_CLOSING, "CLOSING" ) + +#define rpc_show_sock_state(state) \ + __print_symbolic(state, RPC_SHOW_SOCK) + +RPC_SHOW_SOCK + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +DECLARE_EVENT_CLASS(xs_socket_event, + + TP_PROTO( + struct rpc_xprt *xprt, + struct socket *socket + ), + + TP_ARGS(xprt, socket), + + TP_STRUCT__entry( + __field(unsigned int, socket_state) + __field(unsigned int, sock_state) + __field(unsigned long long, ino) + __string(dstaddr, + xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(dstport, + xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + struct inode *inode = SOCK_INODE(socket); + __entry->socket_state = socket->state; + __entry->sock_state = socket->sk->sk_state; + __entry->ino = (unsigned long long)inode->i_ino; + __assign_str(dstaddr, + xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(dstport, + xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk( + "socket:[%llu] dstaddr=%s/%s " + "state=%u (%s) sk_state=%u (%s)", + __entry->ino, __get_str(dstaddr), __get_str(dstport), + __entry->socket_state, + rpc_show_socket_state(__entry->socket_state), + __entry->sock_state, + rpc_show_sock_state(__entry->sock_state) + ) +); +#define DEFINE_RPC_SOCKET_EVENT(name) \ + DEFINE_EVENT(xs_socket_event, name, \ + TP_PROTO( \ + struct rpc_xprt *xprt, \ + struct socket *socket \ + ), \ + TP_ARGS(xprt, socket)) + +DECLARE_EVENT_CLASS(xs_socket_event_done, + + TP_PROTO( + struct rpc_xprt *xprt, + struct socket *socket, + int error + ), + + TP_ARGS(xprt, socket, error), + + TP_STRUCT__entry( + __field(int, error) + __field(unsigned int, socket_state) + __field(unsigned int, sock_state) + __field(unsigned long long, ino) + __string(dstaddr, + xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(dstport, + xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + struct inode *inode = SOCK_INODE(socket); + __entry->socket_state = socket->state; + __entry->sock_state = socket->sk->sk_state; + __entry->ino = (unsigned long long)inode->i_ino; + __entry->error = error; + __assign_str(dstaddr, + xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(dstport, + xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk( + "error=%d socket:[%llu] dstaddr=%s/%s " + "state=%u (%s) sk_state=%u (%s)", + __entry->error, + __entry->ino, __get_str(dstaddr), __get_str(dstport), + __entry->socket_state, + rpc_show_socket_state(__entry->socket_state), + __entry->sock_state, + rpc_show_sock_state(__entry->sock_state) + ) +); +#define DEFINE_RPC_SOCKET_EVENT_DONE(name) \ + DEFINE_EVENT(xs_socket_event_done, name, \ + TP_PROTO( \ + struct rpc_xprt *xprt, \ + struct socket *socket, \ + int error \ + ), \ + TP_ARGS(xprt, socket, error)) + +DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change); +DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect); +DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_error); +DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); +DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); +DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); + +DECLARE_EVENT_CLASS(rpc_xprt_event, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + + TP_ARGS(xprt, xid, status), + + TP_STRUCT__entry( + __field(__be32, xid) + __field(int, status) + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __entry->xid = xid; + __entry->status = status; + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s xid=0x%x status=%d", __get_str(addr), + __get_str(port), be32_to_cpu(__entry->xid), + __entry->status) +); + +DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +DEFINE_EVENT(rpc_xprt_event, xprt_transmit, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +TRACE_EVENT(xs_tcp_data_ready, + TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total), + + TP_ARGS(xprt, err, total), + + TP_STRUCT__entry( + __field(int, err) + __field(unsigned int, total) + __string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] : + "(null)") + __string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] : + "(null)") + ), + + TP_fast_assign( + __entry->err = err; + __entry->total = total; + __assign_str(addr, xprt ? + xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)"); + __assign_str(port, xprt ? + xprt->address_strings[RPC_DISPLAY_PORT] : "(null)"); + ), + + TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr), + __get_str(port), __entry->err, __entry->total) +); + +#define rpc_show_sock_xprt_flags(flags) \ + __print_flags(flags, "|", \ + { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \ + { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \ + { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \ + { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \ + { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \ + { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \ + { TCP_RPC_REPLY, "TCP_RPC_REPLY" }) + +TRACE_EVENT(xs_tcp_data_recv, + TP_PROTO(struct sock_xprt *xs), + + TP_ARGS(xs), + + TP_STRUCT__entry( + __string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]) + __string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]) + __field(__be32, xid) + __field(unsigned long, flags) + __field(unsigned long, copied) + __field(unsigned int, reclen) + __field(unsigned long, offset) + ), + + TP_fast_assign( + __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]); + __entry->xid = xs->tcp_xid; + __entry->flags = xs->tcp_flags; + __entry->copied = xs->tcp_copied; + __entry->reclen = xs->tcp_reclen; + __entry->offset = xs->tcp_offset; + ), + + TP_printk("peer=[%s]:%s xid=0x%x flags=%s copied=%lu reclen=%u offset=%lu", + __get_str(addr), __get_str(port), be32_to_cpu(__entry->xid), + rpc_show_sock_xprt_flags(__entry->flags), + __entry->copied, __entry->reclen, __entry->offset) +); + +#define show_rqstp_flags(flags) \ + __print_flags(flags, "|", \ + { (1UL << RQ_SECURE), "RQ_SECURE"}, \ + { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \ + { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \ + { (1UL << RQ_DROPME), "RQ_DROPME"}, \ + { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \ + { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \ + { (1UL << RQ_BUSY), "RQ_BUSY"}) + +TRACE_EVENT(svc_recv, + TP_PROTO(struct svc_rqst *rqst, int status), + + TP_ARGS(rqst, status), + + TP_STRUCT__entry( + __field(struct sockaddr *, addr) + __field(__be32, xid) + __field(int, status) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->addr = (struct sockaddr *)&rqst->rq_addr; + __entry->xid = status > 0 ? rqst->rq_xid : 0; + __entry->status = status; + __entry->flags = rqst->rq_flags; + ), + + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr, + be32_to_cpu(__entry->xid), __entry->status, + show_rqstp_flags(__entry->flags)) +); + +DECLARE_EVENT_CLASS(svc_rqst_status, + + TP_PROTO(struct svc_rqst *rqst, int status), + + TP_ARGS(rqst, status), + + TP_STRUCT__entry( + __field(struct sockaddr *, addr) + __field(__be32, xid) + __field(int, dropme) + __field(int, status) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->addr = (struct sockaddr *)&rqst->rq_addr; + __entry->xid = rqst->rq_xid; + __entry->status = status; + __entry->flags = rqst->rq_flags; + ), + + TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s", + __entry->addr, be32_to_cpu(__entry->xid), + __entry->status, show_rqstp_flags(__entry->flags)) +); + +DEFINE_EVENT(svc_rqst_status, svc_process, + TP_PROTO(struct svc_rqst *rqst, int status), + TP_ARGS(rqst, status)); + +DEFINE_EVENT(svc_rqst_status, svc_send, + TP_PROTO(struct svc_rqst *rqst, int status), + TP_ARGS(rqst, status)); + +#define show_svc_xprt_flags(flags) \ + __print_flags(flags, "|", \ + { (1UL << XPT_BUSY), "XPT_BUSY"}, \ + { (1UL << XPT_CONN), "XPT_CONN"}, \ + { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \ + { (1UL << XPT_DATA), "XPT_DATA"}, \ + { (1UL << XPT_TEMP), "XPT_TEMP"}, \ + { (1UL << XPT_DEAD), "XPT_DEAD"}, \ + { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \ + { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \ + { (1UL << XPT_OLD), "XPT_OLD"}, \ + { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \ + { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \ + { (1UL << XPT_LOCAL), "XPT_LOCAL"}) + +TRACE_EVENT(svc_xprt_do_enqueue, + TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst), + + TP_ARGS(xprt, rqst), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(struct svc_rqst *, rqst) + ), + + TP_fast_assign( + __entry->xprt = xprt; + __entry->rqst = rqst; + ), + + TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, + (struct sockaddr *)&__entry->xprt->xpt_remote, + __entry->rqst ? __entry->rqst->rq_task->pid : 0, + show_svc_xprt_flags(__entry->xprt->xpt_flags)) +); + +TRACE_EVENT(svc_xprt_dequeue, + TP_PROTO(struct svc_xprt *xprt), + + TP_ARGS(xprt), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field_struct(struct sockaddr_storage, ss) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->xprt = xprt, + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); + __entry->flags = xprt ? xprt->xpt_flags : 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt, + (struct sockaddr *)&__entry->ss, + show_svc_xprt_flags(__entry->flags)) +); + +TRACE_EVENT(svc_wake_up, + TP_PROTO(int pid), + + TP_ARGS(pid), + + TP_STRUCT__entry( + __field(int, pid) + ), + + TP_fast_assign( + __entry->pid = pid; + ), + + TP_printk("pid=%d", __entry->pid) +); + +TRACE_EVENT(svc_handle_xprt, + TP_PROTO(struct svc_xprt *xprt, int len), + + TP_ARGS(xprt, len), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(int, len) + ), + + TP_fast_assign( + __entry->xprt = xprt; + __entry->len = len; + ), + + TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, + (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len, + show_svc_xprt_flags(__entry->xprt->xpt_flags)) +); +#endif /* _TRACE_SUNRPC_H */ + +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/swiotlb.h b/kernel/include/trace/events/swiotlb.h new file mode 100644 index 000000000..7ea4c5e7c --- /dev/null +++ b/kernel/include/trace/events/swiotlb.h @@ -0,0 +1,46 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM swiotlb + +#if !defined(_TRACE_SWIOTLB_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SWIOTLB_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(swiotlb_bounced, + + TP_PROTO(struct device *dev, + dma_addr_t dev_addr, + size_t size, + int swiotlb_force), + + TP_ARGS(dev, dev_addr, size, swiotlb_force), + + TP_STRUCT__entry( + __string( dev_name, dev_name(dev) ) + __field( u64, dma_mask ) + __field( dma_addr_t, dev_addr ) + __field( size_t, size ) + __field( int, swiotlb_force ) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(dev)); + __entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0); + __entry->dev_addr = dev_addr; + __entry->size = size; + __entry->swiotlb_force = swiotlb_force; + ), + + TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx " + "size=%zu %s", + __get_str(dev_name), + __entry->dma_mask, + (unsigned long long)__entry->dev_addr, + __entry->size, + __entry->swiotlb_force ? "swiotlb_force" : "" ) +); + +#endif /* _TRACE_SWIOTLB_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/syscalls.h b/kernel/include/trace/events/syscalls.h new file mode 100644 index 000000000..14e49c798 --- /dev/null +++ b/kernel/include/trace/events/syscalls.h @@ -0,0 +1,72 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM raw_syscalls +#define TRACE_INCLUDE_FILE syscalls + +#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENTS_SYSCALLS_H + +#include <linux/tracepoint.h> + +#include <asm/ptrace.h> +#include <asm/syscall.h> + + +#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS + +TRACE_EVENT_FN(sys_enter, + + TP_PROTO(struct pt_regs *regs, long id), + + TP_ARGS(regs, id), + + TP_STRUCT__entry( + __field( long, id ) + __array( unsigned long, args, 6 ) + ), + + TP_fast_assign( + __entry->id = id; + syscall_get_arguments(current, regs, 0, 6, __entry->args); + ), + + TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", + __entry->id, + __entry->args[0], __entry->args[1], __entry->args[2], + __entry->args[3], __entry->args[4], __entry->args[5]), + + syscall_regfunc, syscall_unregfunc +); + +TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY) + +TRACE_EVENT_FN(sys_exit, + + TP_PROTO(struct pt_regs *regs, long ret), + + TP_ARGS(regs, ret), + + TP_STRUCT__entry( + __field( long, id ) + __field( long, ret ) + ), + + TP_fast_assign( + __entry->id = syscall_get_nr(current, regs); + __entry->ret = ret; + ), + + TP_printk("NR %ld = %ld", + __entry->id, __entry->ret), + + syscall_regfunc, syscall_unregfunc +); + +TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY) + +#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ + +#endif /* _TRACE_EVENTS_SYSCALLS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> + diff --git a/kernel/include/trace/events/target.h b/kernel/include/trace/events/target.h new file mode 100644 index 000000000..04c3c6efd --- /dev/null +++ b/kernel/include/trace/events/target.h @@ -0,0 +1,214 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM target + +#if !defined(_TRACE_TARGET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TARGET_H + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> + +/* cribbed verbatim from <trace/event/scsi.h> */ +#define scsi_opcode_name(opcode) { opcode, #opcode } +#define show_opcode_name(val) \ + __print_symbolic(val, \ + scsi_opcode_name(TEST_UNIT_READY), \ + scsi_opcode_name(REZERO_UNIT), \ + scsi_opcode_name(REQUEST_SENSE), \ + scsi_opcode_name(FORMAT_UNIT), \ + scsi_opcode_name(READ_BLOCK_LIMITS), \ + scsi_opcode_name(REASSIGN_BLOCKS), \ + scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \ + scsi_opcode_name(READ_6), \ + scsi_opcode_name(WRITE_6), \ + scsi_opcode_name(SEEK_6), \ + scsi_opcode_name(READ_REVERSE), \ + scsi_opcode_name(WRITE_FILEMARKS), \ + scsi_opcode_name(SPACE), \ + scsi_opcode_name(INQUIRY), \ + scsi_opcode_name(RECOVER_BUFFERED_DATA), \ + scsi_opcode_name(MODE_SELECT), \ + scsi_opcode_name(RESERVE), \ + scsi_opcode_name(RELEASE), \ + scsi_opcode_name(COPY), \ + scsi_opcode_name(ERASE), \ + scsi_opcode_name(MODE_SENSE), \ + scsi_opcode_name(START_STOP), \ + scsi_opcode_name(RECEIVE_DIAGNOSTIC), \ + scsi_opcode_name(SEND_DIAGNOSTIC), \ + scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \ + scsi_opcode_name(SET_WINDOW), \ + scsi_opcode_name(READ_CAPACITY), \ + scsi_opcode_name(READ_10), \ + scsi_opcode_name(WRITE_10), \ + scsi_opcode_name(SEEK_10), \ + scsi_opcode_name(POSITION_TO_ELEMENT), \ + scsi_opcode_name(WRITE_VERIFY), \ + scsi_opcode_name(VERIFY), \ + scsi_opcode_name(SEARCH_HIGH), \ + scsi_opcode_name(SEARCH_EQUAL), \ + scsi_opcode_name(SEARCH_LOW), \ + scsi_opcode_name(SET_LIMITS), \ + scsi_opcode_name(PRE_FETCH), \ + scsi_opcode_name(READ_POSITION), \ + scsi_opcode_name(SYNCHRONIZE_CACHE), \ + scsi_opcode_name(LOCK_UNLOCK_CACHE), \ + scsi_opcode_name(READ_DEFECT_DATA), \ + scsi_opcode_name(MEDIUM_SCAN), \ + scsi_opcode_name(COMPARE), \ + scsi_opcode_name(COPY_VERIFY), \ + scsi_opcode_name(WRITE_BUFFER), \ + scsi_opcode_name(READ_BUFFER), \ + scsi_opcode_name(UPDATE_BLOCK), \ + scsi_opcode_name(READ_LONG), \ + scsi_opcode_name(WRITE_LONG), \ + scsi_opcode_name(CHANGE_DEFINITION), \ + scsi_opcode_name(WRITE_SAME), \ + scsi_opcode_name(UNMAP), \ + scsi_opcode_name(READ_TOC), \ + scsi_opcode_name(LOG_SELECT), \ + scsi_opcode_name(LOG_SENSE), \ + scsi_opcode_name(XDWRITEREAD_10), \ + scsi_opcode_name(MODE_SELECT_10), \ + scsi_opcode_name(RESERVE_10), \ + scsi_opcode_name(RELEASE_10), \ + scsi_opcode_name(MODE_SENSE_10), \ + scsi_opcode_name(PERSISTENT_RESERVE_IN), \ + scsi_opcode_name(PERSISTENT_RESERVE_OUT), \ + scsi_opcode_name(VARIABLE_LENGTH_CMD), \ + scsi_opcode_name(REPORT_LUNS), \ + scsi_opcode_name(MAINTENANCE_IN), \ + scsi_opcode_name(MAINTENANCE_OUT), \ + scsi_opcode_name(MOVE_MEDIUM), \ + scsi_opcode_name(EXCHANGE_MEDIUM), \ + scsi_opcode_name(READ_12), \ + scsi_opcode_name(WRITE_12), \ + scsi_opcode_name(WRITE_VERIFY_12), \ + scsi_opcode_name(SEARCH_HIGH_12), \ + scsi_opcode_name(SEARCH_EQUAL_12), \ + scsi_opcode_name(SEARCH_LOW_12), \ + scsi_opcode_name(READ_ELEMENT_STATUS), \ + scsi_opcode_name(SEND_VOLUME_TAG), \ + scsi_opcode_name(WRITE_LONG_2), \ + scsi_opcode_name(READ_16), \ + scsi_opcode_name(WRITE_16), \ + scsi_opcode_name(VERIFY_16), \ + scsi_opcode_name(WRITE_SAME_16), \ + scsi_opcode_name(SERVICE_ACTION_IN_16), \ + scsi_opcode_name(SAI_READ_CAPACITY_16), \ + scsi_opcode_name(SAI_GET_LBA_STATUS), \ + scsi_opcode_name(MI_REPORT_TARGET_PGS), \ + scsi_opcode_name(MO_SET_TARGET_PGS), \ + scsi_opcode_name(READ_32), \ + scsi_opcode_name(WRITE_32), \ + scsi_opcode_name(WRITE_SAME_32), \ + scsi_opcode_name(ATA_16), \ + scsi_opcode_name(ATA_12)) + +#define show_task_attribute_name(val) \ + __print_symbolic(val, \ + { TCM_SIMPLE_TAG, "SIMPLE" }, \ + { TCM_HEAD_TAG, "HEAD" }, \ + { TCM_ORDERED_TAG, "ORDERED" }, \ + { TCM_ACA_TAG, "ACA" } ) + +#define show_scsi_status_name(val) \ + __print_symbolic(val, \ + { SAM_STAT_GOOD, "GOOD" }, \ + { SAM_STAT_CHECK_CONDITION, "CHECK CONDITION" }, \ + { SAM_STAT_CONDITION_MET, "CONDITION MET" }, \ + { SAM_STAT_BUSY, "BUSY" }, \ + { SAM_STAT_INTERMEDIATE, "INTERMEDIATE" }, \ + { SAM_STAT_INTERMEDIATE_CONDITION_MET, "INTERMEDIATE CONDITION MET" }, \ + { SAM_STAT_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, \ + { SAM_STAT_COMMAND_TERMINATED, "COMMAND TERMINATED" }, \ + { SAM_STAT_TASK_SET_FULL, "TASK SET FULL" }, \ + { SAM_STAT_ACA_ACTIVE, "ACA ACTIVE" }, \ + { SAM_STAT_TASK_ABORTED, "TASK ABORTED" } ) + +TRACE_EVENT(target_sequencer_start, + + TP_PROTO(struct se_cmd *cmd), + + TP_ARGS(cmd), + + TP_STRUCT__entry( + __field( unsigned int, unpacked_lun ) + __field( unsigned int, opcode ) + __field( unsigned int, data_length ) + __field( unsigned int, task_attribute ) + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) + __string( initiator, cmd->se_sess->se_node_acl->initiatorname ) + ), + + TP_fast_assign( + __entry->unpacked_lun = cmd->orig_fe_lun; + __entry->opcode = cmd->t_task_cdb[0]; + __entry->data_length = cmd->data_length; + __entry->task_attribute = cmd->sam_task_attr; + memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); + __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); + ), + + TP_printk("%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)", + __get_str(initiator), __entry->unpacked_lun, + show_opcode_name(__entry->opcode), + __entry->data_length, __print_hex(__entry->cdb, 16), + show_task_attribute_name(__entry->task_attribute), + scsi_command_size(__entry->cdb) <= 16 ? + __entry->cdb[scsi_command_size(__entry->cdb) - 1] : + __entry->cdb[1] + ) +); + +TRACE_EVENT(target_cmd_complete, + + TP_PROTO(struct se_cmd *cmd), + + TP_ARGS(cmd), + + TP_STRUCT__entry( + __field( unsigned int, unpacked_lun ) + __field( unsigned int, opcode ) + __field( unsigned int, data_length ) + __field( unsigned int, task_attribute ) + __field( unsigned char, scsi_status ) + __field( unsigned char, sense_length ) + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) + __array( unsigned char, sense_data, 18 ) + __string(initiator, cmd->se_sess->se_node_acl->initiatorname) + ), + + TP_fast_assign( + __entry->unpacked_lun = cmd->orig_fe_lun; + __entry->opcode = cmd->t_task_cdb[0]; + __entry->data_length = cmd->data_length; + __entry->task_attribute = cmd->sam_task_attr; + __entry->scsi_status = cmd->scsi_status; + __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ? + min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0; + memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); + memcpy(__entry->sense_data, cmd->sense_buffer, __entry->sense_length); + __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); + ), + + TP_printk("%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)", + __get_str(initiator), __entry->unpacked_lun, + show_scsi_status_name(__entry->scsi_status), + __entry->sense_length, __entry->sense_length ? " / " : "", + __print_hex(__entry->sense_data, __entry->sense_length), + show_opcode_name(__entry->opcode), + __entry->data_length, __print_hex(__entry->cdb, 16), + show_task_attribute_name(__entry->task_attribute), + scsi_command_size(__entry->cdb) <= 16 ? + __entry->cdb[scsi_command_size(__entry->cdb) - 1] : + __entry->cdb[1] + ) +); + +#endif /* _TRACE_TARGET_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/task.h b/kernel/include/trace/events/task.h new file mode 100644 index 000000000..dee3bb1d5 --- /dev/null +++ b/kernel/include/trace/events/task.h @@ -0,0 +1,61 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM task + +#if !defined(_TRACE_TASK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TASK_H +#include <linux/tracepoint.h> + +TRACE_EVENT(task_newtask, + + TP_PROTO(struct task_struct *task, unsigned long clone_flags), + + TP_ARGS(task, clone_flags), + + TP_STRUCT__entry( + __field( pid_t, pid) + __array( char, comm, TASK_COMM_LEN) + __field( unsigned long, clone_flags) + __field( short, oom_score_adj) + ), + + TP_fast_assign( + __entry->pid = task->pid; + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->clone_flags = clone_flags; + __entry->oom_score_adj = task->signal->oom_score_adj; + ), + + TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%hd", + __entry->pid, __entry->comm, + __entry->clone_flags, __entry->oom_score_adj) +); + +TRACE_EVENT(task_rename, + + TP_PROTO(struct task_struct *task, const char *comm), + + TP_ARGS(task, comm), + + TP_STRUCT__entry( + __field( pid_t, pid) + __array( char, oldcomm, TASK_COMM_LEN) + __array( char, newcomm, TASK_COMM_LEN) + __field( short, oom_score_adj) + ), + + TP_fast_assign( + __entry->pid = task->pid; + memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); + memcpy(entry->newcomm, comm, TASK_COMM_LEN); + __entry->oom_score_adj = task->signal->oom_score_adj; + ), + + TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd", + __entry->pid, __entry->oldcomm, + __entry->newcomm, __entry->oom_score_adj) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/thermal.h b/kernel/include/trace/events/thermal.h new file mode 100644 index 000000000..0f4f95d63 --- /dev/null +++ b/kernel/include/trace/events/thermal.h @@ -0,0 +1,83 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thermal + +#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_THERMAL_H + +#include <linux/thermal.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(thermal_temperature, + + TP_PROTO(struct thermal_zone_device *tz), + + TP_ARGS(tz), + + TP_STRUCT__entry( + __string(thermal_zone, tz->type) + __field(int, id) + __field(int, temp_prev) + __field(int, temp) + ), + + TP_fast_assign( + __assign_str(thermal_zone, tz->type); + __entry->id = tz->id; + __entry->temp_prev = tz->last_temperature; + __entry->temp = tz->temperature; + ), + + TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d", + __get_str(thermal_zone), __entry->id, __entry->temp_prev, + __entry->temp) +); + +TRACE_EVENT(cdev_update, + + TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target), + + TP_ARGS(cdev, target), + + TP_STRUCT__entry( + __string(type, cdev->type) + __field(unsigned long, target) + ), + + TP_fast_assign( + __assign_str(type, cdev->type); + __entry->target = target; + ), + + TP_printk("type=%s target=%lu", __get_str(type), __entry->target) +); + +TRACE_EVENT(thermal_zone_trip, + + TP_PROTO(struct thermal_zone_device *tz, int trip, + enum thermal_trip_type trip_type), + + TP_ARGS(tz, trip, trip_type), + + TP_STRUCT__entry( + __string(thermal_zone, tz->type) + __field(int, id) + __field(int, trip) + __field(enum thermal_trip_type, trip_type) + ), + + TP_fast_assign( + __assign_str(thermal_zone, tz->type); + __entry->id = tz->id; + __entry->trip = trip; + __entry->trip_type = trip_type; + ), + + TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d", + __get_str(thermal_zone), __entry->id, __entry->trip, + __entry->trip_type) +); + +#endif /* _TRACE_THERMAL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/thp.h b/kernel/include/trace/events/thp.h new file mode 100644 index 000000000..b59b065e9 --- /dev/null +++ b/kernel/include/trace/events/thp.h @@ -0,0 +1,88 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thp + +#if !defined(_TRACE_THP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_THP_H + +#include <linux/types.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(hugepage_invalidate, + + TP_PROTO(unsigned long addr, unsigned long pte), + TP_ARGS(addr, pte), + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, pte) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pte = pte; + ), + + TP_printk("hugepage invalidate at addr 0x%lx and pte = 0x%lx", + __entry->addr, __entry->pte) +); + +TRACE_EVENT(hugepage_set_pmd, + + TP_PROTO(unsigned long addr, unsigned long pmd), + TP_ARGS(addr, pmd), + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, pmd) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pmd = pmd; + ), + + TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd) +); + + +TRACE_EVENT(hugepage_update, + + TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set), + TP_ARGS(addr, pte, clr, set), + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, pte) + __field(unsigned long, clr) + __field(unsigned long, set) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pte = pte; + __entry->clr = clr; + __entry->set = set; + + ), + + TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set) +); +TRACE_EVENT(hugepage_splitting, + + TP_PROTO(unsigned long addr, unsigned long pte), + TP_ARGS(addr, pte), + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, pte) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pte = pte; + ), + + TP_printk("hugepage splitting at addr 0x%lx and pte = 0x%lx", + __entry->addr, __entry->pte) +); + +#endif /* _TRACE_THP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/timer.h b/kernel/include/trace/events/timer.h new file mode 100644 index 000000000..68c2c2000 --- /dev/null +++ b/kernel/include/trace/events/timer.h @@ -0,0 +1,350 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM timer + +#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TIMER_H + +#include <linux/tracepoint.h> +#include <linux/hrtimer.h> +#include <linux/timer.h> + +DECLARE_EVENT_CLASS(timer_class, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer=%p", __entry->timer) +); + +/** + * timer_init - called when the timer is initialized + * @timer: pointer to struct timer_list + */ +DEFINE_EVENT(timer_class, timer_init, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** + * timer_start - called when the timer is started + * @timer: pointer to struct timer_list + * @expires: the timers expiry time + */ +TRACE_EVENT(timer_start, + + TP_PROTO(struct timer_list *timer, unsigned long expires), + + TP_ARGS(timer, expires), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( void *, function ) + __field( unsigned long, expires ) + __field( unsigned long, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->function = timer->function; + __entry->expires = expires; + __entry->now = jiffies; + ), + + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", + __entry->timer, __entry->function, __entry->expires, + (long)__entry->expires - __entry->now) +); + +/** + * timer_expire_entry - called immediately before the timer callback + * @timer: pointer to struct timer_list + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(timer_expire_entry, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( unsigned long, now ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->now = jiffies; + __entry->function = timer->function; + ), + + TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) +); + +/** + * timer_expire_exit - called immediately after the timer callback returns + * @timer: pointer to struct timer_list + * + * When used in combination with the timer_expire_entry tracepoint we can + * determine the runtime of the timer callback function. + * + * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might + * be invalid. We solely track the pointer. + */ +DEFINE_EVENT(timer_class, timer_expire_exit, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** + * timer_cancel - called when the timer is canceled + * @timer: pointer to struct timer_list + */ +DEFINE_EVENT(timer_class, timer_cancel, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** + * hrtimer_init - called when the hrtimer is initialized + * @hrtimer: pointer to struct hrtimer + * @clockid: the hrtimers clock + * @mode: the hrtimers mode + */ +TRACE_EVENT(hrtimer_init, + + TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, + enum hrtimer_mode mode), + + TP_ARGS(hrtimer, clockid, mode), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + __field( clockid_t, clockid ) + __field( enum hrtimer_mode, mode ) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + __entry->clockid = clockid; + __entry->mode = mode; + ), + + TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, + __entry->clockid == CLOCK_REALTIME ? + "CLOCK_REALTIME" : "CLOCK_MONOTONIC", + __entry->mode == HRTIMER_MODE_ABS ? + "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") +); + +/** + * hrtimer_start - called when the hrtimer is started + * @hrtimer: pointer to struct hrtimer + */ +TRACE_EVENT(hrtimer_start, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + __field( void *, function ) + __field( s64, expires ) + __field( s64, softexpires ) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + __entry->function = hrtimer->function; + __entry->expires = hrtimer_get_expires(hrtimer).tv64; + __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; + ), + + TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", + __entry->hrtimer, __entry->function, + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->expires }), + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->softexpires })) +); + +/** + * hrtimer_expire_entry - called immediately before the hrtimer callback + * @hrtimer: pointer to struct hrtimer + * @now: pointer to variable which contains current time of the + * timers base. + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(hrtimer_expire_entry, + + TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), + + TP_ARGS(hrtimer, now), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + __field( s64, now ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + __entry->now = now->tv64; + __entry->function = hrtimer->function; + ), + + TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, + (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) + ); + +DECLARE_EVENT_CLASS(hrtimer_class, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + ), + + TP_printk("hrtimer=%p", __entry->hrtimer) +); + +/** + * hrtimer_expire_exit - called immediately after the hrtimer callback returns + * @hrtimer: pointer to struct hrtimer + * + * When used in combination with the hrtimer_expire_entry tracepoint we can + * determine the runtime of the callback function. + */ +DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer) +); + +/** + * hrtimer_cancel - called when the hrtimer is canceled + * @hrtimer: pointer to struct hrtimer + */ +DEFINE_EVENT(hrtimer_class, hrtimer_cancel, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer) +); + +/** + * itimer_state - called when itimer is started or canceled + * @which: name of the interval timer + * @value: the itimers value, itimer is canceled if value->it_value is + * zero, otherwise it is started + * @expires: the itimers expiry time + */ +TRACE_EVENT(itimer_state, + + TP_PROTO(int which, const struct itimerval *const value, + cputime_t expires), + + TP_ARGS(which, value, expires), + + TP_STRUCT__entry( + __field( int, which ) + __field( cputime_t, expires ) + __field( long, value_sec ) + __field( long, value_usec ) + __field( long, interval_sec ) + __field( long, interval_usec ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->expires = expires; + __entry->value_sec = value->it_value.tv_sec; + __entry->value_usec = value->it_value.tv_usec; + __entry->interval_sec = value->it_interval.tv_sec; + __entry->interval_usec = value->it_interval.tv_usec; + ), + + TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld", + __entry->which, (unsigned long long)__entry->expires, + __entry->value_sec, __entry->value_usec, + __entry->interval_sec, __entry->interval_usec) +); + +/** + * itimer_expire - called when itimer expires + * @which: type of the interval timer + * @pid: pid of the process which owns the timer + * @now: current time, used to calculate the latency of itimer + */ +TRACE_EVENT(itimer_expire, + + TP_PROTO(int which, struct pid *pid, cputime_t now), + + TP_ARGS(which, pid, now), + + TP_STRUCT__entry( + __field( int , which ) + __field( pid_t, pid ) + __field( cputime_t, now ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->now = now; + __entry->pid = pid_nr(pid); + ), + + TP_printk("which=%d pid=%d now=%llu", __entry->which, + (int) __entry->pid, (unsigned long long)__entry->now) +); + +#ifdef CONFIG_NO_HZ_COMMON +TRACE_EVENT(tick_stop, + + TP_PROTO(int success, char *error_msg), + + TP_ARGS(success, error_msg), + + TP_STRUCT__entry( + __field( int , success ) + __string( msg, error_msg ) + ), + + TP_fast_assign( + __entry->success = success; + __assign_str(msg, error_msg); + ), + + TP_printk("success=%s msg=%s", __entry->success ? "yes" : "no", __get_str(msg)) +); +#endif + +#endif /* _TRACE_TIMER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/tlb.h b/kernel/include/trace/events/tlb.h new file mode 100644 index 000000000..4250f364a --- /dev/null +++ b/kernel/include/trace/events/tlb.h @@ -0,0 +1,62 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM tlb + +#if !defined(_TRACE_TLB_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TLB_H + +#include <linux/mm_types.h> +#include <linux/tracepoint.h> + +#define TLB_FLUSH_REASON \ + EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \ + EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \ + EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \ + EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" ) + +/* + * First define the enums in TLB_FLUSH_REASON to be exported to userspace + * via TRACE_DEFINE_ENUM(). + */ +#undef EM +#undef EMe +#define EM(a,b) TRACE_DEFINE_ENUM(a); +#define EMe(a,b) TRACE_DEFINE_ENUM(a); + +TLB_FLUSH_REASON + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a,b) { a, b }, +#define EMe(a,b) { a, b } + +TRACE_EVENT_CONDITION(tlb_flush, + + TP_PROTO(int reason, unsigned long pages), + TP_ARGS(reason, pages), + + TP_CONDITION(cpu_online(smp_processor_id())), + + TP_STRUCT__entry( + __field( int, reason) + __field(unsigned long, pages) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->pages = pages; + ), + + TP_printk("pages:%ld reason:%s (%d)", + __entry->pages, + __print_symbolic(__entry->reason, TLB_FLUSH_REASON), + __entry->reason) +); + +#endif /* _TRACE_TLB_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/udp.h b/kernel/include/trace/events/udp.h new file mode 100644 index 000000000..a664bb940 --- /dev/null +++ b/kernel/include/trace/events/udp.h @@ -0,0 +1,32 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM udp + +#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_UDP_H + +#include <linux/udp.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(udp_fail_queue_rcv_skb, + + TP_PROTO(int rc, struct sock *sk), + + TP_ARGS(rc, sk), + + TP_STRUCT__entry( + __field(int, rc) + __field(__u16, lport) + ), + + TP_fast_assign( + __entry->rc = rc; + __entry->lport = inet_sk(sk)->inet_num; + ), + + TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport) +); + +#endif /* _TRACE_UDP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/v4l2.h b/kernel/include/trace/events/v4l2.h new file mode 100644 index 000000000..20112170f --- /dev/null +++ b/kernel/include/trace/events/v4l2.h @@ -0,0 +1,183 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM v4l2 + +#if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_V4L2_H + +#include <linux/tracepoint.h> + +/* Enums require being exported to userspace, for user tool parsing */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +#define show_type(type) \ + __print_symbolic(type, SHOW_TYPE) + +#define SHOW_TYPE \ + EM( V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" ) \ + EM( V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" ) \ + EM( V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" ) \ + EM( V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" ) \ + EM( V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" ) \ + EM( V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" ) \ + EM( V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" ) \ + EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" ) \ + EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \ + EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \ + EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \ + EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" ) + +SHOW_TYPE + +#define show_field(field) \ + __print_symbolic(field, SHOW_FIELD) + +#define SHOW_FIELD \ + EM( V4L2_FIELD_ANY, "ANY" ) \ + EM( V4L2_FIELD_NONE, "NONE" ) \ + EM( V4L2_FIELD_TOP, "TOP" ) \ + EM( V4L2_FIELD_BOTTOM, "BOTTOM" ) \ + EM( V4L2_FIELD_INTERLACED, "INTERLACED" ) \ + EM( V4L2_FIELD_SEQ_TB, "SEQ_TB" ) \ + EM( V4L2_FIELD_SEQ_BT, "SEQ_BT" ) \ + EM( V4L2_FIELD_ALTERNATE, "ALTERNATE" ) \ + EM( V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" ) \ + EMe( V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" ) + +SHOW_FIELD + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +/* V4L2_TC_TYPE_* are macros, not defines, they do not need processing */ + +#define show_timecode_type(type) \ + __print_symbolic(type, \ + { V4L2_TC_TYPE_24FPS, "24FPS" }, \ + { V4L2_TC_TYPE_25FPS, "25FPS" }, \ + { V4L2_TC_TYPE_30FPS, "30FPS" }, \ + { V4L2_TC_TYPE_50FPS, "50FPS" }, \ + { V4L2_TC_TYPE_60FPS, "60FPS" }) + +#define show_flags(flags) \ + __print_flags(flags, "|", \ + { V4L2_BUF_FLAG_MAPPED, "MAPPED" }, \ + { V4L2_BUF_FLAG_QUEUED, "QUEUED" }, \ + { V4L2_BUF_FLAG_DONE, "DONE" }, \ + { V4L2_BUF_FLAG_KEYFRAME, "KEYFRAME" }, \ + { V4L2_BUF_FLAG_PFRAME, "PFRAME" }, \ + { V4L2_BUF_FLAG_BFRAME, "BFRAME" }, \ + { V4L2_BUF_FLAG_ERROR, "ERROR" }, \ + { V4L2_BUF_FLAG_TIMECODE, "TIMECODE" }, \ + { V4L2_BUF_FLAG_PREPARED, "PREPARED" }, \ + { V4L2_BUF_FLAG_NO_CACHE_INVALIDATE, "NO_CACHE_INVALIDATE" }, \ + { V4L2_BUF_FLAG_NO_CACHE_CLEAN, "NO_CACHE_CLEAN" }, \ + { V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \ + { V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \ + { V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \ + { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" }) + +#define show_timecode_flags(flags) \ + __print_flags(flags, "|", \ + { V4L2_TC_FLAG_DROPFRAME, "DROPFRAME" }, \ + { V4L2_TC_FLAG_COLORFRAME, "COLORFRAME" }, \ + { V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \ + { V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" }) + +#define V4L2_TRACE_EVENT(event_name) \ + TRACE_EVENT(event_name, \ + TP_PROTO(int minor, struct v4l2_buffer *buf), \ + \ + TP_ARGS(minor, buf), \ + \ + TP_STRUCT__entry( \ + __field(int, minor) \ + __field(u32, index) \ + __field(u32, type) \ + __field(u32, bytesused) \ + __field(u32, flags) \ + __field(u32, field) \ + __field(s64, timestamp) \ + __field(u32, timecode_type) \ + __field(u32, timecode_flags) \ + __field(u8, timecode_frames) \ + __field(u8, timecode_seconds) \ + __field(u8, timecode_minutes) \ + __field(u8, timecode_hours) \ + __field(u8, timecode_userbits0) \ + __field(u8, timecode_userbits1) \ + __field(u8, timecode_userbits2) \ + __field(u8, timecode_userbits3) \ + __field(u32, sequence) \ + ), \ + \ + TP_fast_assign( \ + __entry->minor = minor; \ + __entry->index = buf->index; \ + __entry->type = buf->type; \ + __entry->bytesused = buf->bytesused; \ + __entry->flags = buf->flags; \ + __entry->field = buf->field; \ + __entry->timestamp = \ + timeval_to_ns(&buf->timestamp); \ + __entry->timecode_type = buf->timecode.type; \ + __entry->timecode_flags = buf->timecode.flags; \ + __entry->timecode_frames = \ + buf->timecode.frames; \ + __entry->timecode_seconds = \ + buf->timecode.seconds; \ + __entry->timecode_minutes = \ + buf->timecode.minutes; \ + __entry->timecode_hours = buf->timecode.hours; \ + __entry->timecode_userbits0 = \ + buf->timecode.userbits[0]; \ + __entry->timecode_userbits1 = \ + buf->timecode.userbits[1]; \ + __entry->timecode_userbits2 = \ + buf->timecode.userbits[2]; \ + __entry->timecode_userbits3 = \ + buf->timecode.userbits[3]; \ + __entry->sequence = buf->sequence; \ + ), \ + \ + TP_printk("minor = %d, index = %u, type = %s, " \ + "bytesused = %u, flags = %s, " \ + "field = %s, timestamp = %llu, timecode = { " \ + "type = %s, flags = %s, frames = %u, " \ + "seconds = %u, minutes = %u, hours = %u, " \ + "userbits = { %u %u %u %u } }, " \ + "sequence = %u", __entry->minor, \ + __entry->index, show_type(__entry->type), \ + __entry->bytesused, \ + show_flags(__entry->flags), \ + show_field(__entry->field), \ + __entry->timestamp, \ + show_timecode_type(__entry->timecode_type), \ + show_timecode_flags(__entry->timecode_flags), \ + __entry->timecode_frames, \ + __entry->timecode_seconds, \ + __entry->timecode_minutes, \ + __entry->timecode_hours, \ + __entry->timecode_userbits0, \ + __entry->timecode_userbits1, \ + __entry->timecode_userbits2, \ + __entry->timecode_userbits3, \ + __entry->sequence \ + ) \ + ) + +V4L2_TRACE_EVENT(v4l2_dqbuf); +V4L2_TRACE_EVENT(v4l2_qbuf); + +#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/vmscan.h b/kernel/include/trace/events/vmscan.h new file mode 100644 index 000000000..f66476b96 --- /dev/null +++ b/kernel/include/trace/events/vmscan.h @@ -0,0 +1,390 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vmscan + +#if !defined(_TRACE_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_VMSCAN_H + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <linux/mm.h> +#include <linux/memcontrol.h> +#include <trace/events/gfpflags.h> + +#define RECLAIM_WB_ANON 0x0001u +#define RECLAIM_WB_FILE 0x0002u +#define RECLAIM_WB_MIXED 0x0010u +#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */ +#define RECLAIM_WB_ASYNC 0x0008u + +#define show_reclaim_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \ + {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \ + {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \ + {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \ + {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \ + ) : "RECLAIM_WB_NONE" + +#define trace_reclaim_flags(page) ( \ + (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ + (RECLAIM_WB_ASYNC) \ + ) + +#define trace_shrink_flags(file) \ + ( \ + (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ + (RECLAIM_WB_ASYNC) \ + ) + +TRACE_EVENT(mm_vmscan_kswapd_sleep, + + TP_PROTO(int nid), + + TP_ARGS(nid), + + TP_STRUCT__entry( + __field( int, nid ) + ), + + TP_fast_assign( + __entry->nid = nid; + ), + + TP_printk("nid=%d", __entry->nid) +); + +TRACE_EVENT(mm_vmscan_kswapd_wake, + + TP_PROTO(int nid, int order), + + TP_ARGS(nid, order), + + TP_STRUCT__entry( + __field( int, nid ) + __field( int, order ) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->order = order; + ), + + TP_printk("nid=%d order=%d", __entry->nid, __entry->order) +); + +TRACE_EVENT(mm_vmscan_wakeup_kswapd, + + TP_PROTO(int nid, int zid, int order), + + TP_ARGS(nid, zid, order), + + TP_STRUCT__entry( + __field( int, nid ) + __field( int, zid ) + __field( int, order ) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->zid = zid; + __entry->order = order; + ), + + TP_printk("nid=%d zid=%d order=%d", + __entry->nid, + __entry->zid, + __entry->order) +); + +DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags), + + TP_STRUCT__entry( + __field( int, order ) + __field( int, may_writepage ) + __field( gfp_t, gfp_flags ) + ), + + TP_fast_assign( + __entry->order = order; + __entry->may_writepage = may_writepage; + __entry->gfp_flags = gfp_flags; + ), + + TP_printk("order=%d may_writepage=%d gfp_flags=%s", + __entry->order, + __entry->may_writepage, + show_gfp_flags(__entry->gfp_flags)) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags) +); + +DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed), + + TP_STRUCT__entry( + __field( unsigned long, nr_reclaimed ) + ), + + TP_fast_assign( + __entry->nr_reclaimed = nr_reclaimed; + ), + + TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed) +); + +TRACE_EVENT(mm_shrink_slab_start, + TP_PROTO(struct shrinker *shr, struct shrink_control *sc, + long nr_objects_to_shrink, unsigned long pgs_scanned, + unsigned long lru_pgs, unsigned long cache_items, + unsigned long long delta, unsigned long total_scan), + + TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, + cache_items, delta, total_scan), + + TP_STRUCT__entry( + __field(struct shrinker *, shr) + __field(void *, shrink) + __field(int, nid) + __field(long, nr_objects_to_shrink) + __field(gfp_t, gfp_flags) + __field(unsigned long, pgs_scanned) + __field(unsigned long, lru_pgs) + __field(unsigned long, cache_items) + __field(unsigned long long, delta) + __field(unsigned long, total_scan) + ), + + TP_fast_assign( + __entry->shr = shr; + __entry->shrink = shr->scan_objects; + __entry->nid = sc->nid; + __entry->nr_objects_to_shrink = nr_objects_to_shrink; + __entry->gfp_flags = sc->gfp_mask; + __entry->pgs_scanned = pgs_scanned; + __entry->lru_pgs = lru_pgs; + __entry->cache_items = cache_items; + __entry->delta = delta; + __entry->total_scan = total_scan; + ), + + TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", + __entry->shrink, + __entry->shr, + __entry->nid, + __entry->nr_objects_to_shrink, + show_gfp_flags(__entry->gfp_flags), + __entry->pgs_scanned, + __entry->lru_pgs, + __entry->cache_items, + __entry->delta, + __entry->total_scan) +); + +TRACE_EVENT(mm_shrink_slab_end, + TP_PROTO(struct shrinker *shr, int nid, int shrinker_retval, + long unused_scan_cnt, long new_scan_cnt, long total_scan), + + TP_ARGS(shr, nid, shrinker_retval, unused_scan_cnt, new_scan_cnt, + total_scan), + + TP_STRUCT__entry( + __field(struct shrinker *, shr) + __field(int, nid) + __field(void *, shrink) + __field(long, unused_scan) + __field(long, new_scan) + __field(int, retval) + __field(long, total_scan) + ), + + TP_fast_assign( + __entry->shr = shr; + __entry->nid = nid; + __entry->shrink = shr->scan_objects; + __entry->unused_scan = unused_scan_cnt; + __entry->new_scan = new_scan_cnt; + __entry->retval = shrinker_retval; + __entry->total_scan = total_scan; + ), + + TP_printk("%pF %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", + __entry->shrink, + __entry->shr, + __entry->nid, + __entry->unused_scan, + __entry->new_scan, + __entry->total_scan, + __entry->retval) +); + +DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + isolate_mode_t isolate_mode, + int file), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file), + + TP_STRUCT__entry( + __field(int, order) + __field(unsigned long, nr_requested) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_taken) + __field(isolate_mode_t, isolate_mode) + __field(int, file) + ), + + TP_fast_assign( + __entry->order = order; + __entry->nr_requested = nr_requested; + __entry->nr_scanned = nr_scanned; + __entry->nr_taken = nr_taken; + __entry->isolate_mode = isolate_mode; + __entry->file = file; + ), + + TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d", + __entry->isolate_mode, + __entry->order, + __entry->nr_requested, + __entry->nr_scanned, + __entry->nr_taken, + __entry->file) +); + +DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + isolate_mode_t isolate_mode, + int file), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file) + +); + +DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + isolate_mode_t isolate_mode, + int file), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file) + +); + +TRACE_EVENT(mm_vmscan_writepage, + + TP_PROTO(struct page *page, + int reclaim_flags), + + TP_ARGS(page, reclaim_flags), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->reclaim_flags = reclaim_flags; + ), + + TP_printk("page=%p pfn=%lu flags=%s", + pfn_to_page(__entry->pfn), + __entry->pfn, + show_reclaim_flags(__entry->reclaim_flags)) +); + +TRACE_EVENT(mm_vmscan_lru_shrink_inactive, + + TP_PROTO(int nid, int zid, + unsigned long nr_scanned, unsigned long nr_reclaimed, + int priority, int reclaim_flags), + + TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, zid) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_reclaimed) + __field(int, priority) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->zid = zid; + __entry->nr_scanned = nr_scanned; + __entry->nr_reclaimed = nr_reclaimed; + __entry->priority = priority; + __entry->reclaim_flags = reclaim_flags; + ), + + TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s", + __entry->nid, __entry->zid, + __entry->nr_scanned, __entry->nr_reclaimed, + __entry->priority, + show_reclaim_flags(__entry->reclaim_flags)) +); + +#endif /* _TRACE_VMSCAN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/workqueue.h b/kernel/include/trace/events/workqueue.h new file mode 100644 index 000000000..bf0e18ba6 --- /dev/null +++ b/kernel/include/trace/events/workqueue.h @@ -0,0 +1,121 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM workqueue + +#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_WORKQUEUE_H + +#include <linux/tracepoint.h> +#include <linux/workqueue.h> + +DECLARE_EVENT_CLASS(workqueue_work, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + ), + + TP_fast_assign( + __entry->work = work; + ), + + TP_printk("work struct %p", __entry->work) +); + +/** + * workqueue_queue_work - called when a work gets queued + * @req_cpu: the requested cpu + * @pwq: pointer to struct pool_workqueue + * @work: pointer to struct work_struct + * + * This event occurs when a work is queued immediately or once a + * delayed work is actually queued on a workqueue (ie: once the delay + * has been reached). + */ +TRACE_EVENT(workqueue_queue_work, + + TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq, + struct work_struct *work), + + TP_ARGS(req_cpu, pwq, work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, function) + __field( void *, workqueue) + __field( unsigned int, req_cpu ) + __field( unsigned int, cpu ) + ), + + TP_fast_assign( + __entry->work = work; + __entry->function = work->func; + __entry->workqueue = pwq->wq; + __entry->req_cpu = req_cpu; + __entry->cpu = pwq->pool->cpu; + ), + + TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", + __entry->work, __entry->function, __entry->workqueue, + __entry->req_cpu, __entry->cpu) +); + +/** + * workqueue_activate_work - called when a work gets activated + * @work: pointer to struct work_struct + * + * This event occurs when a queued work is put on the active queue, + * which happens immediately after queueing unless @max_active limit + * is reached. + */ +DEFINE_EVENT(workqueue_work, workqueue_activate_work, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work) +); + +/** + * workqueue_execute_start - called immediately before the workqueue callback + * @work: pointer to struct work_struct + * + * Allows to track workqueue execution. + */ +TRACE_EVENT(workqueue_execute_start, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->work = work; + __entry->function = work->func; + ), + + TP_printk("work struct %p: function %pf", __entry->work, __entry->function) +); + +/** + * workqueue_execute_end - called immediately after the workqueue callback + * @work: pointer to struct work_struct + * + * Allows to track workqueue execution. + */ +DEFINE_EVENT(workqueue_work, workqueue_execute_end, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work) +); + +#endif /* _TRACE_WORKQUEUE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/writeback.h b/kernel/include/trace/events/writeback.h new file mode 100644 index 000000000..c178d13d6 --- /dev/null +++ b/kernel/include/trace/events/writeback.h @@ -0,0 +1,676 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM writeback + +#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_WRITEBACK_H + +#include <linux/tracepoint.h> +#include <linux/backing-dev.h> +#include <linux/writeback.h> + +#define show_inode_state(state) \ + __print_flags(state, "|", \ + {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ + {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ + {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ + {I_NEW, "I_NEW"}, \ + {I_WILL_FREE, "I_WILL_FREE"}, \ + {I_FREEING, "I_FREEING"}, \ + {I_CLEAR, "I_CLEAR"}, \ + {I_SYNC, "I_SYNC"}, \ + {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ + {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ + {I_REFERENCED, "I_REFERENCED"} \ + ) + +/* enums need to be exported to user space */ +#undef EM +#undef EMe +#define EM(a,b) TRACE_DEFINE_ENUM(a); +#define EMe(a,b) TRACE_DEFINE_ENUM(a); + +#define WB_WORK_REASON \ + EM( WB_REASON_BACKGROUND, "background") \ + EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \ + EM( WB_REASON_SYNC, "sync") \ + EM( WB_REASON_PERIODIC, "periodic") \ + EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ + EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \ + EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ + EMe(WB_REASON_FORKER_THREAD, "forker_thread") + +WB_WORK_REASON + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a,b) { a, b }, +#define EMe(a,b) { a, b } + +struct wb_writeback_work; + +TRACE_EVENT(writeback_dirty_page, + + TP_PROTO(struct page *page, struct address_space *mapping), + + TP_ARGS(page, mapping), + + TP_STRUCT__entry ( + __array(char, name, 32) + __field(unsigned long, ino) + __field(pgoff_t, index) + ), + + TP_fast_assign( + strncpy(__entry->name, + mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); + __entry->ino = mapping ? mapping->host->i_ino : 0; + __entry->index = page->index; + ), + + TP_printk("bdi %s: ino=%lu index=%lu", + __entry->name, + __entry->ino, + __entry->index + ) +); + +DECLARE_EVENT_CLASS(writeback_dirty_inode_template, + + TP_PROTO(struct inode *inode, int flags), + + TP_ARGS(inode, flags), + + TP_STRUCT__entry ( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned long, state) + __field(unsigned long, flags) + ), + + TP_fast_assign( + struct backing_dev_info *bdi = inode_to_bdi(inode); + + /* may be called for files on pseudo FSes w/ unregistered bdi */ + strncpy(__entry->name, + bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->flags = flags; + ), + + TP_printk("bdi %s: ino=%lu state=%s flags=%s", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), + show_inode_state(__entry->flags) + ) +); + +DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, + + TP_PROTO(struct inode *inode, int flags), + + TP_ARGS(inode, flags) +); + +DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, + + TP_PROTO(struct inode *inode, int flags), + + TP_ARGS(inode, flags) +); + +DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, + + TP_PROTO(struct inode *inode, int flags), + + TP_ARGS(inode, flags) +); + +DECLARE_EVENT_CLASS(writeback_write_inode_template, + + TP_PROTO(struct inode *inode, struct writeback_control *wbc), + + TP_ARGS(inode, wbc), + + TP_STRUCT__entry ( + __array(char, name, 32) + __field(unsigned long, ino) + __field(int, sync_mode) + ), + + TP_fast_assign( + strncpy(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); + __entry->ino = inode->i_ino; + __entry->sync_mode = wbc->sync_mode; + ), + + TP_printk("bdi %s: ino=%lu sync_mode=%d", + __entry->name, + __entry->ino, + __entry->sync_mode + ) +); + +DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, + + TP_PROTO(struct inode *inode, struct writeback_control *wbc), + + TP_ARGS(inode, wbc) +); + +DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, + + TP_PROTO(struct inode *inode, struct writeback_control *wbc), + + TP_ARGS(inode, wbc) +); + +DECLARE_EVENT_CLASS(writeback_work_class, + TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), + TP_ARGS(bdi, work), + TP_STRUCT__entry( + __array(char, name, 32) + __field(long, nr_pages) + __field(dev_t, sb_dev) + __field(int, sync_mode) + __field(int, for_kupdate) + __field(int, range_cyclic) + __field(int, for_background) + __field(int, reason) + ), + TP_fast_assign( + strncpy(__entry->name, + bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); + __entry->nr_pages = work->nr_pages; + __entry->sb_dev = work->sb ? work->sb->s_dev : 0; + __entry->sync_mode = work->sync_mode; + __entry->for_kupdate = work->for_kupdate; + __entry->range_cyclic = work->range_cyclic; + __entry->for_background = work->for_background; + __entry->reason = work->reason; + ), + TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " + "kupdate=%d range_cyclic=%d background=%d reason=%s", + __entry->name, + MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), + __entry->nr_pages, + __entry->sync_mode, + __entry->for_kupdate, + __entry->range_cyclic, + __entry->for_background, + __print_symbolic(__entry->reason, WB_WORK_REASON) + ) +); +#define DEFINE_WRITEBACK_WORK_EVENT(name) \ +DEFINE_EVENT(writeback_work_class, name, \ + TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ + TP_ARGS(bdi, work)) +DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); +DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); +DEFINE_WRITEBACK_WORK_EVENT(writeback_start); +DEFINE_WRITEBACK_WORK_EVENT(writeback_written); +DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); + +TRACE_EVENT(writeback_pages_written, + TP_PROTO(long pages_written), + TP_ARGS(pages_written), + TP_STRUCT__entry( + __field(long, pages) + ), + TP_fast_assign( + __entry->pages = pages_written; + ), + TP_printk("%ld", __entry->pages) +); + +DECLARE_EVENT_CLASS(writeback_class, + TP_PROTO(struct backing_dev_info *bdi), + TP_ARGS(bdi), + TP_STRUCT__entry( + __array(char, name, 32) + ), + TP_fast_assign( + strncpy(__entry->name, dev_name(bdi->dev), 32); + ), + TP_printk("bdi %s", + __entry->name + ) +); +#define DEFINE_WRITEBACK_EVENT(name) \ +DEFINE_EVENT(writeback_class, name, \ + TP_PROTO(struct backing_dev_info *bdi), \ + TP_ARGS(bdi)) + +DEFINE_WRITEBACK_EVENT(writeback_nowork); +DEFINE_WRITEBACK_EVENT(writeback_wake_background); +DEFINE_WRITEBACK_EVENT(writeback_bdi_register); + +DECLARE_EVENT_CLASS(wbc_class, + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), + TP_ARGS(wbc, bdi), + TP_STRUCT__entry( + __array(char, name, 32) + __field(long, nr_to_write) + __field(long, pages_skipped) + __field(int, sync_mode) + __field(int, for_kupdate) + __field(int, for_background) + __field(int, for_reclaim) + __field(int, range_cyclic) + __field(long, range_start) + __field(long, range_end) + ), + + TP_fast_assign( + strncpy(__entry->name, dev_name(bdi->dev), 32); + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->sync_mode = wbc->sync_mode; + __entry->for_kupdate = wbc->for_kupdate; + __entry->for_background = wbc->for_background; + __entry->for_reclaim = wbc->for_reclaim; + __entry->range_cyclic = wbc->range_cyclic; + __entry->range_start = (long)wbc->range_start; + __entry->range_end = (long)wbc->range_end; + ), + + TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " + "bgrd=%d reclm=%d cyclic=%d " + "start=0x%lx end=0x%lx", + __entry->name, + __entry->nr_to_write, + __entry->pages_skipped, + __entry->sync_mode, + __entry->for_kupdate, + __entry->for_background, + __entry->for_reclaim, + __entry->range_cyclic, + __entry->range_start, + __entry->range_end) +) + +#define DEFINE_WBC_EVENT(name) \ +DEFINE_EVENT(wbc_class, name, \ + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ + TP_ARGS(wbc, bdi)) +DEFINE_WBC_EVENT(wbc_writepage); + +TRACE_EVENT(writeback_queue_io, + TP_PROTO(struct bdi_writeback *wb, + struct wb_writeback_work *work, + int moved), + TP_ARGS(wb, work, moved), + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, older) + __field(long, age) + __field(int, moved) + __field(int, reason) + ), + TP_fast_assign( + unsigned long *older_than_this = work->older_than_this; + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + __entry->older = older_than_this ? *older_than_this : 0; + __entry->age = older_than_this ? + (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->moved = moved; + __entry->reason = work->reason; + ), + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", + __entry->name, + __entry->older, /* older_than_this in jiffies */ + __entry->age, /* older_than_this in relative milliseconds */ + __entry->moved, + __print_symbolic(__entry->reason, WB_WORK_REASON) + ) +); + +TRACE_EVENT(global_dirty_state, + + TP_PROTO(unsigned long background_thresh, + unsigned long dirty_thresh + ), + + TP_ARGS(background_thresh, + dirty_thresh + ), + + TP_STRUCT__entry( + __field(unsigned long, nr_dirty) + __field(unsigned long, nr_writeback) + __field(unsigned long, nr_unstable) + __field(unsigned long, background_thresh) + __field(unsigned long, dirty_thresh) + __field(unsigned long, dirty_limit) + __field(unsigned long, nr_dirtied) + __field(unsigned long, nr_written) + ), + + TP_fast_assign( + __entry->nr_dirty = global_page_state(NR_FILE_DIRTY); + __entry->nr_writeback = global_page_state(NR_WRITEBACK); + __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS); + __entry->nr_dirtied = global_page_state(NR_DIRTIED); + __entry->nr_written = global_page_state(NR_WRITTEN); + __entry->background_thresh = background_thresh; + __entry->dirty_thresh = dirty_thresh; + __entry->dirty_limit = global_dirty_limit; + ), + + TP_printk("dirty=%lu writeback=%lu unstable=%lu " + "bg_thresh=%lu thresh=%lu limit=%lu " + "dirtied=%lu written=%lu", + __entry->nr_dirty, + __entry->nr_writeback, + __entry->nr_unstable, + __entry->background_thresh, + __entry->dirty_thresh, + __entry->dirty_limit, + __entry->nr_dirtied, + __entry->nr_written + ) +); + +#define KBps(x) ((x) << (PAGE_SHIFT - 10)) + +TRACE_EVENT(bdi_dirty_ratelimit, + + TP_PROTO(struct backing_dev_info *bdi, + unsigned long dirty_rate, + unsigned long task_ratelimit), + + TP_ARGS(bdi, dirty_rate, task_ratelimit), + + TP_STRUCT__entry( + __array(char, bdi, 32) + __field(unsigned long, write_bw) + __field(unsigned long, avg_write_bw) + __field(unsigned long, dirty_rate) + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned long, balanced_dirty_ratelimit) + ), + + TP_fast_assign( + strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + __entry->write_bw = KBps(bdi->write_bandwidth); + __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); + __entry->dirty_rate = KBps(dirty_rate); + __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->balanced_dirty_ratelimit = + KBps(bdi->balanced_dirty_ratelimit); + ), + + TP_printk("bdi %s: " + "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "balanced_dirty_ratelimit=%lu", + __entry->bdi, + __entry->write_bw, /* write bandwidth */ + __entry->avg_write_bw, /* avg write bandwidth */ + __entry->dirty_rate, /* bdi dirty rate */ + __entry->dirty_ratelimit, /* base ratelimit */ + __entry->task_ratelimit, /* ratelimit with position control */ + __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ + ) +); + +TRACE_EVENT(balance_dirty_pages, + + TP_PROTO(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long dirty_ratelimit, + unsigned long task_ratelimit, + unsigned long dirtied, + unsigned long period, + long pause, + unsigned long start_time), + + TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, + dirty_ratelimit, task_ratelimit, + dirtied, period, pause, start_time), + + TP_STRUCT__entry( + __array( char, bdi, 32) + __field(unsigned long, limit) + __field(unsigned long, setpoint) + __field(unsigned long, dirty) + __field(unsigned long, bdi_setpoint) + __field(unsigned long, bdi_dirty) + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned int, dirtied) + __field(unsigned int, dirtied_pause) + __field(unsigned long, paused) + __field( long, pause) + __field(unsigned long, period) + __field( long, think) + ), + + TP_fast_assign( + unsigned long freerun = (thresh + bg_thresh) / 2; + strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + + __entry->limit = global_dirty_limit; + __entry->setpoint = (global_dirty_limit + freerun) / 2; + __entry->dirty = dirty; + __entry->bdi_setpoint = __entry->setpoint * + bdi_thresh / (thresh + 1); + __entry->bdi_dirty = bdi_dirty; + __entry->dirty_ratelimit = KBps(dirty_ratelimit); + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->dirtied = dirtied; + __entry->dirtied_pause = current->nr_dirtied_pause; + __entry->think = current->dirty_paused_when == 0 ? 0 : + (long)(jiffies - current->dirty_paused_when) * 1000/HZ; + __entry->period = period * 1000 / HZ; + __entry->pause = pause * 1000 / HZ; + __entry->paused = (jiffies - start_time) * 1000 / HZ; + ), + + + TP_printk("bdi %s: " + "limit=%lu setpoint=%lu dirty=%lu " + "bdi_setpoint=%lu bdi_dirty=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "dirtied=%u dirtied_pause=%u " + "paused=%lu pause=%ld period=%lu think=%ld", + __entry->bdi, + __entry->limit, + __entry->setpoint, + __entry->dirty, + __entry->bdi_setpoint, + __entry->bdi_dirty, + __entry->dirty_ratelimit, + __entry->task_ratelimit, + __entry->dirtied, + __entry->dirtied_pause, + __entry->paused, /* ms */ + __entry->pause, /* ms */ + __entry->period, /* ms */ + __entry->think /* ms */ + ) +); + +TRACE_EVENT(writeback_sb_inodes_requeue, + + TP_PROTO(struct inode *inode), + TP_ARGS(inode), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned long, state) + __field(unsigned long, dirtied_when) + ), + + TP_fast_assign( + strncpy(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; + ), + + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), + __entry->dirtied_when, + (jiffies - __entry->dirtied_when) / HZ + ) +); + +DECLARE_EVENT_CLASS(writeback_congest_waited_template, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed), + + TP_STRUCT__entry( + __field( unsigned int, usec_timeout ) + __field( unsigned int, usec_delayed ) + ), + + TP_fast_assign( + __entry->usec_timeout = usec_timeout; + __entry->usec_delayed = usec_delayed; + ), + + TP_printk("usec_timeout=%u usec_delayed=%u", + __entry->usec_timeout, + __entry->usec_delayed) +); + +DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed) +); + +DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed) +); + +DECLARE_EVENT_CLASS(writeback_single_inode_template, + + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write + ), + + TP_ARGS(inode, wbc, nr_to_write), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned long, state) + __field(unsigned long, dirtied_when) + __field(unsigned long, writeback_index) + __field(long, nr_to_write) + __field(unsigned long, wrote) + ), + + TP_fast_assign( + strncpy(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->nr_to_write = nr_to_write; + __entry->wrote = nr_to_write - wbc->nr_to_write; + ), + + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " + "index=%lu to_write=%ld wrote=%lu", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), + __entry->dirtied_when, + (jiffies - __entry->dirtied_when) / HZ, + __entry->writeback_index, + __entry->nr_to_write, + __entry->wrote + ) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write), + TP_ARGS(inode, wbc, nr_to_write) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write), + TP_ARGS(inode, wbc, nr_to_write) +); + +DECLARE_EVENT_CLASS(writeback_lazytime_template, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field(unsigned long, ino ) + __field(unsigned long, state ) + __field( __u16, mode ) + __field(unsigned long, dirtied_when ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->mode = inode->i_mode; + __entry->dirtied_when = inode->dirtied_when; + ), + + TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, __entry->dirtied_when, + show_inode_state(__entry->state), __entry->mode) +); + +DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +#endif /* _TRACE_WRITEBACK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/events/xen.h b/kernel/include/trace/events/xen.h new file mode 100644 index 000000000..bce990f5a --- /dev/null +++ b/kernel/include/trace/events/xen.h @@ -0,0 +1,516 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xen + +#if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XEN_H + +#include <linux/tracepoint.h> +#include <asm/paravirt_types.h> +#include <asm/xen/trace_types.h> + +struct multicall_entry; + +/* Multicalls */ +DECLARE_EVENT_CLASS(xen_mc__batch, + TP_PROTO(enum paravirt_lazy_mode mode), + TP_ARGS(mode), + TP_STRUCT__entry( + __field(enum paravirt_lazy_mode, mode) + ), + TP_fast_assign(__entry->mode = mode), + TP_printk("start batch LAZY_%s", + (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" : + (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE") + ); +#define DEFINE_XEN_MC_BATCH(name) \ + DEFINE_EVENT(xen_mc__batch, name, \ + TP_PROTO(enum paravirt_lazy_mode mode), \ + TP_ARGS(mode)) + +DEFINE_XEN_MC_BATCH(xen_mc_batch); +DEFINE_XEN_MC_BATCH(xen_mc_issue); + +TRACE_EVENT(xen_mc_entry, + TP_PROTO(struct multicall_entry *mc, unsigned nargs), + TP_ARGS(mc, nargs), + TP_STRUCT__entry( + __field(unsigned int, op) + __field(unsigned int, nargs) + __array(unsigned long, args, 6) + ), + TP_fast_assign(__entry->op = mc->op; + __entry->nargs = nargs; + memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs); + memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs)); + ), + TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]", + __entry->op, xen_hypercall_name(__entry->op), + __entry->args[0], __entry->args[1], __entry->args[2], + __entry->args[3], __entry->args[4], __entry->args[5]) + ); + +TRACE_EVENT(xen_mc_entry_alloc, + TP_PROTO(size_t args), + TP_ARGS(args), + TP_STRUCT__entry( + __field(size_t, args) + ), + TP_fast_assign(__entry->args = args), + TP_printk("alloc entry %zu arg bytes", __entry->args) + ); + +TRACE_EVENT(xen_mc_callback, + TP_PROTO(xen_mc_callback_fn_t fn, void *data), + TP_ARGS(fn, data), + TP_STRUCT__entry( + __field(xen_mc_callback_fn_t, fn) + __field(void *, data) + ), + TP_fast_assign( + __entry->fn = fn; + __entry->data = data; + ), + TP_printk("callback %pf, data %p", + __entry->fn, __entry->data) + ); + +TRACE_EVENT(xen_mc_flush_reason, + TP_PROTO(enum xen_mc_flush_reason reason), + TP_ARGS(reason), + TP_STRUCT__entry( + __field(enum xen_mc_flush_reason, reason) + ), + TP_fast_assign(__entry->reason = reason), + TP_printk("flush reason %s", + (__entry->reason == XEN_MC_FL_NONE) ? "NONE" : + (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" : + (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" : + (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??") + ); + +TRACE_EVENT(xen_mc_flush, + TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx), + TP_ARGS(mcidx, argidx, cbidx), + TP_STRUCT__entry( + __field(unsigned, mcidx) + __field(unsigned, argidx) + __field(unsigned, cbidx) + ), + TP_fast_assign(__entry->mcidx = mcidx; + __entry->argidx = argidx; + __entry->cbidx = cbidx), + TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks", + __entry->mcidx, __entry->argidx, __entry->cbidx) + ); + +TRACE_EVENT(xen_mc_extend_args, + TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res), + TP_ARGS(op, args, res), + TP_STRUCT__entry( + __field(unsigned int, op) + __field(size_t, args) + __field(enum xen_mc_extend_args, res) + ), + TP_fast_assign(__entry->op = op; + __entry->args = args; + __entry->res = res), + TP_printk("extending op %u%s by %zu bytes res %s", + __entry->op, xen_hypercall_name(__entry->op), + __entry->args, + __entry->res == XEN_MC_XE_OK ? "OK" : + __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" : + __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???") + ); + +/* mmu */ +DECLARE_EVENT_CLASS(xen_mmu__set_pte, + TP_PROTO(pte_t *ptep, pte_t pteval), + TP_ARGS(ptep, pteval), + TP_STRUCT__entry( + __field(pte_t *, ptep) + __field(pteval_t, pteval) + ), + TP_fast_assign(__entry->ptep = ptep; + __entry->pteval = pteval.pte), + TP_printk("ptep %p pteval %0*llx (raw %0*llx)", + __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) + ); + +#define DEFINE_XEN_MMU_SET_PTE(name) \ + DEFINE_EVENT(xen_mmu__set_pte, name, \ + TP_PROTO(pte_t *ptep, pte_t pteval), \ + TP_ARGS(ptep, pteval)) + +DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); +DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic); + +TRACE_EVENT(xen_mmu_set_domain_pte, + TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid), + TP_ARGS(ptep, pteval, domid), + TP_STRUCT__entry( + __field(pte_t *, ptep) + __field(pteval_t, pteval) + __field(unsigned, domid) + ), + TP_fast_assign(__entry->ptep = ptep; + __entry->pteval = pteval.pte; + __entry->domid = domid), + TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u", + __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval, + __entry->domid) + ); + +TRACE_EVENT(xen_mmu_set_pte_at, + TP_PROTO(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval), + TP_ARGS(mm, addr, ptep, pteval), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(pte_t *, ptep) + __field(pteval_t, pteval) + ), + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->ptep = ptep; + __entry->pteval = pteval.pte), + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", + __entry->mm, __entry->addr, __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) + ); + +TRACE_EVENT(xen_mmu_pte_clear, + TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep), + TP_ARGS(mm, addr, ptep), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(pte_t *, ptep) + ), + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->ptep = ptep), + TP_printk("mm %p addr %lx ptep %p", + __entry->mm, __entry->addr, __entry->ptep) + ); + +TRACE_EVENT(xen_mmu_set_pmd, + TP_PROTO(pmd_t *pmdp, pmd_t pmdval), + TP_ARGS(pmdp, pmdval), + TP_STRUCT__entry( + __field(pmd_t *, pmdp) + __field(pmdval_t, pmdval) + ), + TP_fast_assign(__entry->pmdp = pmdp; + __entry->pmdval = pmdval.pmd), + TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)", + __entry->pmdp, + (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)), + (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval) + ); + +TRACE_EVENT(xen_mmu_pmd_clear, + TP_PROTO(pmd_t *pmdp), + TP_ARGS(pmdp), + TP_STRUCT__entry( + __field(pmd_t *, pmdp) + ), + TP_fast_assign(__entry->pmdp = pmdp), + TP_printk("pmdp %p", __entry->pmdp) + ); + +#if CONFIG_PGTABLE_LEVELS >= 4 + +TRACE_EVENT(xen_mmu_set_pud, + TP_PROTO(pud_t *pudp, pud_t pudval), + TP_ARGS(pudp, pudval), + TP_STRUCT__entry( + __field(pud_t *, pudp) + __field(pudval_t, pudval) + ), + TP_fast_assign(__entry->pudp = pudp; + __entry->pudval = native_pud_val(pudval)), + TP_printk("pudp %p pudval %0*llx (raw %0*llx)", + __entry->pudp, + (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)), + (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval) + ); + +TRACE_EVENT(xen_mmu_set_pgd, + TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval), + TP_ARGS(pgdp, user_pgdp, pgdval), + TP_STRUCT__entry( + __field(pgd_t *, pgdp) + __field(pgd_t *, user_pgdp) + __field(pgdval_t, pgdval) + ), + TP_fast_assign(__entry->pgdp = pgdp; + __entry->user_pgdp = user_pgdp; + __entry->pgdval = pgdval.pgd), + TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)", + __entry->pgdp, __entry->user_pgdp, + (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)), + (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval) + ); + +TRACE_EVENT(xen_mmu_pud_clear, + TP_PROTO(pud_t *pudp), + TP_ARGS(pudp), + TP_STRUCT__entry( + __field(pud_t *, pudp) + ), + TP_fast_assign(__entry->pudp = pudp), + TP_printk("pudp %p", __entry->pudp) + ); +#else + +TRACE_EVENT(xen_mmu_set_pud, + TP_PROTO(pud_t *pudp, pud_t pudval), + TP_ARGS(pudp, pudval), + TP_STRUCT__entry( + __field(pud_t *, pudp) + __field(pudval_t, pudval) + ), + TP_fast_assign(__entry->pudp = pudp; + __entry->pudval = native_pud_val(pudval)), + TP_printk("pudp %p pudval %0*llx (raw %0*llx)", + __entry->pudp, + (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)), + (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval) + ); + +#endif + +TRACE_EVENT(xen_mmu_pgd_clear, + TP_PROTO(pgd_t *pgdp), + TP_ARGS(pgdp), + TP_STRUCT__entry( + __field(pgd_t *, pgdp) + ), + TP_fast_assign(__entry->pgdp = pgdp), + TP_printk("pgdp %p", __entry->pgdp) + ); + +DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot, + TP_PROTO(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval), + TP_ARGS(mm, addr, ptep, pteval), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(pte_t *, ptep) + __field(pteval_t, pteval) + ), + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->ptep = ptep; + __entry->pteval = pteval.pte), + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", + __entry->mm, __entry->addr, __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) + ); +#define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \ + DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \ + TP_PROTO(struct mm_struct *mm, unsigned long addr, \ + pte_t *ptep, pte_t pteval), \ + TP_ARGS(mm, addr, ptep, pteval)) + +DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start); +DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit); + +TRACE_EVENT(xen_mmu_alloc_ptpage, + TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned), + TP_ARGS(mm, pfn, level, pinned), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, pfn) + __field(unsigned, level) + __field(bool, pinned) + ), + TP_fast_assign(__entry->mm = mm; + __entry->pfn = pfn; + __entry->level = level; + __entry->pinned = pinned), + TP_printk("mm %p pfn %lx level %d %spinned", + __entry->mm, __entry->pfn, __entry->level, + __entry->pinned ? "" : "un") + ); + +TRACE_EVENT(xen_mmu_release_ptpage, + TP_PROTO(unsigned long pfn, unsigned level, bool pinned), + TP_ARGS(pfn, level, pinned), + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned, level) + __field(bool, pinned) + ), + TP_fast_assign(__entry->pfn = pfn; + __entry->level = level; + __entry->pinned = pinned), + TP_printk("pfn %lx level %d %spinned", + __entry->pfn, __entry->level, + __entry->pinned ? "" : "un") + ); + +DECLARE_EVENT_CLASS(xen_mmu_pgd, + TP_PROTO(struct mm_struct *mm, pgd_t *pgd), + TP_ARGS(mm, pgd), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(pgd_t *, pgd) + ), + TP_fast_assign(__entry->mm = mm; + __entry->pgd = pgd), + TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd) + ); +#define DEFINE_XEN_MMU_PGD_EVENT(name) \ + DEFINE_EVENT(xen_mmu_pgd, name, \ + TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \ + TP_ARGS(mm, pgd)) + +DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); +DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); + +TRACE_EVENT(xen_mmu_flush_tlb_all, + TP_PROTO(int x), + TP_ARGS(x), + TP_STRUCT__entry(__array(char, x, 0)), + TP_fast_assign((void)x), + TP_printk("%s", "") + ); + +TRACE_EVENT(xen_mmu_flush_tlb, + TP_PROTO(int x), + TP_ARGS(x), + TP_STRUCT__entry(__array(char, x, 0)), + TP_fast_assign((void)x), + TP_printk("%s", "") + ); + +TRACE_EVENT(xen_mmu_flush_tlb_single, + TP_PROTO(unsigned long addr), + TP_ARGS(addr), + TP_STRUCT__entry( + __field(unsigned long, addr) + ), + TP_fast_assign(__entry->addr = addr), + TP_printk("addr %lx", __entry->addr) + ); + +TRACE_EVENT(xen_mmu_flush_tlb_others, + TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm, + unsigned long addr, unsigned long end), + TP_ARGS(cpus, mm, addr, end), + TP_STRUCT__entry( + __field(unsigned, ncpus) + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(unsigned long, end) + ), + TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); + __entry->mm = mm; + __entry->addr = addr, + __entry->end = end), + TP_printk("ncpus %d mm %p addr %lx, end %lx", + __entry->ncpus, __entry->mm, __entry->addr, __entry->end) + ); + +TRACE_EVENT(xen_mmu_write_cr3, + TP_PROTO(bool kernel, unsigned long cr3), + TP_ARGS(kernel, cr3), + TP_STRUCT__entry( + __field(bool, kernel) + __field(unsigned long, cr3) + ), + TP_fast_assign(__entry->kernel = kernel; + __entry->cr3 = cr3), + TP_printk("%s cr3 %lx", + __entry->kernel ? "kernel" : "user", __entry->cr3) + ); + + +/* CPU */ +TRACE_EVENT(xen_cpu_write_ldt_entry, + TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc), + TP_ARGS(dt, entrynum, desc), + TP_STRUCT__entry( + __field(struct desc_struct *, dt) + __field(int, entrynum) + __field(u64, desc) + ), + TP_fast_assign(__entry->dt = dt; + __entry->entrynum = entrynum; + __entry->desc = desc; + ), + TP_printk("dt %p entrynum %d entry %016llx", + __entry->dt, __entry->entrynum, + (unsigned long long)__entry->desc) + ); + +TRACE_EVENT(xen_cpu_write_idt_entry, + TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent), + TP_ARGS(dt, entrynum, ent), + TP_STRUCT__entry( + __field(gate_desc *, dt) + __field(int, entrynum) + ), + TP_fast_assign(__entry->dt = dt; + __entry->entrynum = entrynum; + ), + TP_printk("dt %p entrynum %d", + __entry->dt, __entry->entrynum) + ); + +TRACE_EVENT(xen_cpu_load_idt, + TP_PROTO(const struct desc_ptr *desc), + TP_ARGS(desc), + TP_STRUCT__entry( + __field(unsigned long, addr) + ), + TP_fast_assign(__entry->addr = desc->address), + TP_printk("addr %lx", __entry->addr) + ); + +TRACE_EVENT(xen_cpu_write_gdt_entry, + TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type), + TP_ARGS(dt, entrynum, desc, type), + TP_STRUCT__entry( + __field(u64, desc) + __field(struct desc_struct *, dt) + __field(int, entrynum) + __field(int, type) + ), + TP_fast_assign(__entry->dt = dt; + __entry->entrynum = entrynum; + __entry->desc = *(u64 *)desc; + __entry->type = type; + ), + TP_printk("dt %p entrynum %d type %d desc %016llx", + __entry->dt, __entry->entrynum, __entry->type, + (unsigned long long)__entry->desc) + ); + +TRACE_EVENT(xen_cpu_set_ldt, + TP_PROTO(const void *addr, unsigned entries), + TP_ARGS(addr, entries), + TP_STRUCT__entry( + __field(const void *, addr) + __field(unsigned, entries) + ), + TP_fast_assign(__entry->addr = addr; + __entry->entries = entries), + TP_printk("addr %p entries %u", + __entry->addr, __entry->entries) + ); + + +#endif /* _TRACE_XEN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/include/trace/ftrace.h b/kernel/include/trace/ftrace.h new file mode 100644 index 000000000..37d4b10b1 --- /dev/null +++ b/kernel/include/trace/ftrace.h @@ -0,0 +1,859 @@ +/* + * Stage 1 of the trace events. + * + * Override the macros in <trace/trace_events.h> to include the following: + * + * struct ftrace_raw_<call> { + * struct trace_entry ent; + * <type> <item>; + * <type2> <item2>[<len>]; + * [...] + * }; + * + * The <type> <item> is created by the __field(type, item) macro or + * the __array(type2, item2, len) macro. + * We simply do "type item;", and that will create the fields + * in the structure. + */ + +#include <linux/ftrace_event.h> + +#ifndef TRACE_SYSTEM_VAR +#define TRACE_SYSTEM_VAR TRACE_SYSTEM +#endif + +#define __app__(x, y) str__##x##y +#define __app(x, y) __app__(x, y) + +#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) + +#define TRACE_MAKE_SYSTEM_STR() \ + static const char TRACE_SYSTEM_STRING[] = \ + __stringify(TRACE_SYSTEM) + +TRACE_MAKE_SYSTEM_STR(); + +#undef TRACE_DEFINE_ENUM +#define TRACE_DEFINE_ENUM(a) \ + static struct trace_enum_map __used __initdata \ + __##TRACE_SYSTEM##_##a = \ + { \ + .system = TRACE_SYSTEM_STRING, \ + .enum_string = #a, \ + .enum_value = a \ + }; \ + static struct trace_enum_map __used \ + __attribute__((section("_ftrace_enum_map"))) \ + *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a + +/* + * DECLARE_EVENT_CLASS can be used to add a generic function + * handlers for events. That is, if all events have the same + * parameters and just have distinct trace points. + * Each tracepoint can be defined with DEFINE_EVENT and that + * will map the DECLARE_EVENT_CLASS to the tracepoint. + * + * TRACE_EVENT is a one to one mapping between tracepoint and template. + */ +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ + DECLARE_EVENT_CLASS(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)); \ + DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); + + +#undef __field +#define __field(type, item) type item; + +#undef __field_ext +#define __field_ext(type, item, filter_type) type item; + +#undef __field_struct +#define __field_struct(type, item) type item; + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) type item; + +#undef __array +#define __array(type, item, len) type item[len]; + +#undef __dynamic_array +#define __dynamic_array(type, item, len) u32 __data_loc_##item; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) + +#undef TP_STRUCT__entry +#define TP_STRUCT__entry(args...) args + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ + struct ftrace_raw_##name { \ + struct trace_entry ent; \ + tstruct \ + char __data[0]; \ + }; \ + \ + static struct ftrace_event_class event_class_##name; + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ + static struct ftrace_event_call __used \ + __attribute__((__aligned__(4))) event_##name + +#undef DEFINE_EVENT_FN +#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +/* Callbacks are meaningless to ftrace. */ +#undef TRACE_EVENT_FN +#define TRACE_EVENT_FN(name, proto, args, tstruct, \ + assign, print, reg, unreg) \ + TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ + PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ + +#undef TRACE_EVENT_FLAGS +#define TRACE_EVENT_FLAGS(name, value) \ + __TRACE_EVENT_FLAGS(name, value) + +#undef TRACE_EVENT_PERF_PERM +#define TRACE_EVENT_PERF_PERM(name, expr...) \ + __TRACE_EVENT_PERF_PERM(name, expr) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * Stage 2 of the trace events. + * + * Include the following: + * + * struct ftrace_data_offsets_<call> { + * u32 <item1>; + * u32 <item2>; + * [...] + * }; + * + * The __dynamic_array() macro will create each u32 <item>, this is + * to keep the offset of each array from the beginning of the event. + * The size of an array is also encoded, in the higher 16 bits of <item>. + */ + +#undef TRACE_DEFINE_ENUM +#define TRACE_DEFINE_ENUM(a) + +#undef __field +#define __field(type, item) + +#undef __field_ext +#define __field_ext(type, item, filter_type) + +#undef __field_struct +#define __field_struct(type, item) + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) + +#undef __array +#define __array(type, item, len) + +#undef __dynamic_array +#define __dynamic_array(type, item, len) u32 item; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + struct ftrace_data_offsets_##call { \ + tstruct; \ + }; + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#undef TRACE_EVENT_FLAGS +#define TRACE_EVENT_FLAGS(event, flag) + +#undef TRACE_EVENT_PERF_PERM +#define TRACE_EVENT_PERF_PERM(event, expr...) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * Stage 3 of the trace events. + * + * Override the macros in <trace/trace_events.h> to include the following: + * + * enum print_line_t + * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) + * { + * struct trace_seq *s = &iter->seq; + * struct ftrace_raw_<call> *field; <-- defined in stage 1 + * struct trace_entry *entry; + * struct trace_seq *p = &iter->tmp_seq; + * int ret; + * + * entry = iter->ent; + * + * if (entry->type != event_<call>->event.type) { + * WARN_ON_ONCE(1); + * return TRACE_TYPE_UNHANDLED; + * } + * + * field = (typeof(field))entry; + * + * trace_seq_init(p); + * ret = trace_seq_printf(s, "%s: ", <call>); + * if (ret) + * ret = trace_seq_printf(s, <TP_printk> "\n"); + * if (!ret) + * return TRACE_TYPE_PARTIAL_LINE; + * + * return TRACE_TYPE_HANDLED; + * } + * + * This is the method used to print the raw event to the trace + * output format. Note, this is not needed if the data is read + * in binary. + */ + +#undef __entry +#define __entry field + +#undef TP_printk +#define TP_printk(fmt, args...) fmt "\n", args + +#undef __get_dynamic_array +#define __get_dynamic_array(field) \ + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) + +#undef __get_dynamic_array_len +#define __get_dynamic_array_len(field) \ + ((__entry->__data_loc_##field >> 16) & 0xffff) + +#undef __get_str +#define __get_str(field) (char *)__get_dynamic_array(field) + +#undef __get_bitmask +#define __get_bitmask(field) \ + ({ \ + void *__bitmask = __get_dynamic_array(field); \ + unsigned int __bitmask_size; \ + __bitmask_size = __get_dynamic_array_len(field); \ + ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ + }) + +#undef __print_flags +#define __print_flags(flag, delim, flag_array...) \ + ({ \ + static const struct trace_print_flags __flags[] = \ + { flag_array, { -1, NULL }}; \ + ftrace_print_flags_seq(p, delim, flag, __flags); \ + }) + +#undef __print_symbolic +#define __print_symbolic(value, symbol_array...) \ + ({ \ + static const struct trace_print_flags symbols[] = \ + { symbol_array, { -1, NULL }}; \ + ftrace_print_symbols_seq(p, value, symbols); \ + }) + +#undef __print_symbolic_u64 +#if BITS_PER_LONG == 32 +#define __print_symbolic_u64(value, symbol_array...) \ + ({ \ + static const struct trace_print_flags_u64 symbols[] = \ + { symbol_array, { -1, NULL } }; \ + ftrace_print_symbols_seq_u64(p, value, symbols); \ + }) +#else +#define __print_symbolic_u64(value, symbol_array...) \ + __print_symbolic(value, symbol_array) +#endif + +#undef __print_hex +#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) + +#undef __print_array +#define __print_array(array, count, el_size) \ + ({ \ + BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ + el_size != 4 && el_size != 8); \ + ftrace_print_array_seq(p, array, count, el_size); \ + }) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace enum print_line_t \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *trace_event) \ +{ \ + struct trace_seq *s = &iter->seq; \ + struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ + struct ftrace_raw_##call *field; \ + int ret; \ + \ + field = (typeof(field))iter->ent; \ + \ + ret = ftrace_raw_output_prep(iter, trace_event); \ + if (ret != TRACE_TYPE_HANDLED) \ + return ret; \ + \ + trace_seq_printf(s, print); \ + \ + return trace_handle_return(s); \ +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ +static notrace enum print_line_t \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *event) \ +{ \ + struct ftrace_raw_##template *field; \ + struct trace_entry *entry; \ + struct trace_seq *p = &iter->tmp_seq; \ + \ + entry = iter->ent; \ + \ + if (entry->type != event_##call.event.type) { \ + WARN_ON_ONCE(1); \ + return TRACE_TYPE_UNHANDLED; \ + } \ + \ + field = (typeof(field))entry; \ + \ + trace_seq_init(p); \ + return ftrace_output_call(iter, #call, print); \ +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef __field_ext +#define __field_ext(type, item, filter_type) \ + ret = trace_define_field(event_call, #type, #item, \ + offsetof(typeof(field), item), \ + sizeof(field.item), \ + is_signed_type(type), filter_type); \ + if (ret) \ + return ret; + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) \ + ret = trace_define_field(event_call, #type, #item, \ + offsetof(typeof(field), item), \ + sizeof(field.item), \ + 0, filter_type); \ + if (ret) \ + return ret; + +#undef __field +#define __field(type, item) __field_ext(type, item, FILTER_OTHER) + +#undef __field_struct +#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) + +#undef __array +#define __array(type, item, len) \ + do { \ + char *type_str = #type"["__stringify(len)"]"; \ + BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ + ret = trace_define_field(event_call, type_str, #item, \ + offsetof(typeof(field), item), \ + sizeof(field.item), \ + is_signed_type(type), FILTER_OTHER); \ + if (ret) \ + return ret; \ + } while (0); + +#undef __dynamic_array +#define __dynamic_array(type, item, len) \ + ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ + offsetof(typeof(field), __data_loc_##item), \ + sizeof(field.__data_loc_##item), \ + is_signed_type(type), FILTER_OTHER); + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ +static int notrace __init \ +ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ +{ \ + struct ftrace_raw_##call field; \ + int ret; \ + \ + tstruct; \ + \ + return ret; \ +} + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * remember the offset of each array from the beginning of the event. + */ + +#undef __entry +#define __entry entry + +#undef __field +#define __field(type, item) + +#undef __field_ext +#define __field_ext(type, item, filter_type) + +#undef __field_struct +#define __field_struct(type, item) + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) + +#undef __array +#define __array(type, item, len) + +#undef __dynamic_array +#define __dynamic_array(type, item, len) \ + __item_length = (len) * sizeof(type); \ + __data_offsets->item = __data_size + \ + offsetof(typeof(*entry), __data); \ + __data_offsets->item |= __item_length << 16; \ + __data_size += __item_length; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, \ + strlen((src) ? (const char *)(src) : "(null)") + 1) + +/* + * __bitmask_size_in_bytes_raw is the number of bytes needed to hold + * num_possible_cpus(). + */ +#define __bitmask_size_in_bytes_raw(nr_bits) \ + (((nr_bits) + 7) / 8) + +#define __bitmask_size_in_longs(nr_bits) \ + ((__bitmask_size_in_bytes_raw(nr_bits) + \ + ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) + +/* + * __bitmask_size_in_bytes is the number of bytes needed to hold + * num_possible_cpus() padded out to the nearest long. This is what + * is saved in the buffer, just to be consistent. + */ +#define __bitmask_size_in_bytes(nr_bits) \ + (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ + __bitmask_size_in_longs(nr_bits)) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static inline notrace int ftrace_get_offsets_##call( \ + struct ftrace_data_offsets_##call *__data_offsets, proto) \ +{ \ + int __data_size = 0; \ + int __maybe_unused __item_length; \ + struct ftrace_raw_##call __maybe_unused *entry; \ + \ + tstruct; \ + \ + return __data_size; \ +} + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * Stage 4 of the trace events. + * + * Override the macros in <trace/trace_events.h> to include the following: + * + * For those macros defined with TRACE_EVENT: + * + * static struct ftrace_event_call event_<call>; + * + * static void ftrace_raw_event_<call>(void *__data, proto) + * { + * struct ftrace_event_file *ftrace_file = __data; + * struct ftrace_event_call *event_call = ftrace_file->event_call; + * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; + * unsigned long eflags = ftrace_file->flags; + * enum event_trigger_type __tt = ETT_NONE; + * struct ring_buffer_event *event; + * struct ftrace_raw_<call> *entry; <-- defined in stage 1 + * struct ring_buffer *buffer; + * unsigned long irq_flags; + * int __data_size; + * int pc; + * + * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { + * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) + * event_triggers_call(ftrace_file, NULL); + * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) + * return; + * } + * + * local_save_flags(irq_flags); + * pc = preempt_count(); + * + * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); + * + * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, + * event_<call>->event.type, + * sizeof(*entry) + __data_size, + * irq_flags, pc); + * if (!event) + * return; + * entry = ring_buffer_event_data(event); + * + * { <assign>; } <-- Here we assign the entries by the __field and + * __array macros. + * + * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) + * __tt = event_triggers_call(ftrace_file, entry); + * + * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, + * &ftrace_file->flags)) + * ring_buffer_discard_commit(buffer, event); + * else if (!filter_check_discard(ftrace_file, entry, buffer, event)) + * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); + * + * if (__tt) + * event_triggers_post_call(ftrace_file, __tt); + * } + * + * static struct trace_event ftrace_event_type_<call> = { + * .trace = ftrace_raw_output_<call>, <-- stage 2 + * }; + * + * static char print_fmt_<call>[] = <TP_printk>; + * + * static struct ftrace_event_class __used event_class_<template> = { + * .system = "<system>", + * .define_fields = ftrace_define_fields_<call>, + * .fields = LIST_HEAD_INIT(event_class_##call.fields), + * .raw_init = trace_event_raw_init, + * .probe = ftrace_raw_event_##call, + * .reg = ftrace_event_reg, + * }; + * + * static struct ftrace_event_call event_<call> = { + * .class = event_class_<template>, + * { + * .tp = &__tracepoint_<call>, + * }, + * .event = &ftrace_event_type_<call>, + * .print_fmt = print_fmt_<call>, + * .flags = TRACE_EVENT_FL_TRACEPOINT, + * }; + * // its only safe to use pointers when doing linker tricks to + * // create an array. + * static struct ftrace_event_call __used + * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; + * + */ + +#ifdef CONFIG_PERF_EVENTS + +#define _TRACE_PERF_PROTO(call, proto) \ + static notrace void \ + perf_trace_##call(void *__data, proto); + +#define _TRACE_PERF_INIT(call) \ + .perf_probe = perf_trace_##call, + +#else +#define _TRACE_PERF_PROTO(call, proto) +#define _TRACE_PERF_INIT(call) +#endif /* CONFIG_PERF_EVENTS */ + +#undef __entry +#define __entry entry + +#undef __field +#define __field(type, item) + +#undef __field_struct +#define __field_struct(type, item) + +#undef __array +#define __array(type, item, len) + +#undef __dynamic_array +#define __dynamic_array(type, item, len) \ + __entry->__data_loc_##item = __data_offsets.item; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __assign_str +#define __assign_str(dst, src) \ + strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef __get_bitmask +#define __get_bitmask(field) (char *)__get_dynamic_array(field) + +#undef __assign_bitmask +#define __assign_bitmask(dst, src, nr_bits) \ + memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) + +#undef TP_fast_assign +#define TP_fast_assign(args...) args + +#undef __perf_addr +#define __perf_addr(a) (a) + +#undef __perf_count +#define __perf_count(c) (c) + +#undef __perf_task +#define __perf_task(t) (t) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + \ +static notrace void \ +ftrace_raw_event_##call(void *__data, proto) \ +{ \ + struct ftrace_event_file *ftrace_file = __data; \ + struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ + struct ftrace_event_buffer fbuffer; \ + struct ftrace_raw_##call *entry; \ + int __data_size; \ + \ + if (ftrace_trigger_soft_disabled(ftrace_file)) \ + return; \ + \ + __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ + \ + entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \ + sizeof(*entry) + __data_size); \ + \ + if (!entry) \ + return; \ + \ + tstruct \ + \ + { assign; } \ + \ + ftrace_event_buffer_commit(&fbuffer); \ +} +/* + * The ftrace_test_probe is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the ftrace probe will + * fail to compile unless it too is updated. + */ + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ +static inline void ftrace_test_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(ftrace_raw_event_##template); \ +} + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef __entry +#define __entry REC + +#undef __print_flags +#undef __print_symbolic +#undef __print_hex +#undef __get_dynamic_array +#undef __get_dynamic_array_len +#undef __get_str +#undef __get_bitmask +#undef __print_array + +#undef TP_printk +#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +_TRACE_PERF_PROTO(call, PARAMS(proto)); \ +static char print_fmt_##call[] = print; \ +static struct ftrace_event_class __used __refdata event_class_##call = { \ + .system = TRACE_SYSTEM_STRING, \ + .define_fields = ftrace_define_fields_##call, \ + .fields = LIST_HEAD_INIT(event_class_##call.fields),\ + .raw_init = trace_event_raw_init, \ + .probe = ftrace_raw_event_##call, \ + .reg = ftrace_event_reg, \ + _TRACE_PERF_INIT(call) \ +}; + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ + \ +static struct ftrace_event_call __used event_##call = { \ + .class = &event_class_##template, \ + { \ + .tp = &__tracepoint_##call, \ + }, \ + .event.funcs = &ftrace_event_type_funcs_##template, \ + .print_fmt = print_fmt_##template, \ + .flags = TRACE_EVENT_FL_TRACEPOINT, \ +}; \ +static struct ftrace_event_call __used \ +__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ + \ +static char print_fmt_##call[] = print; \ + \ +static struct ftrace_event_call __used event_##call = { \ + .class = &event_class_##template, \ + { \ + .tp = &__tracepoint_##call, \ + }, \ + .event.funcs = &ftrace_event_type_funcs_##call, \ + .print_fmt = print_fmt_##call, \ + .flags = TRACE_EVENT_FL_TRACEPOINT, \ +}; \ +static struct ftrace_event_call __used \ +__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef TRACE_SYSTEM_VAR + +#ifdef CONFIG_PERF_EVENTS + +#undef __entry +#define __entry entry + +#undef __get_dynamic_array +#define __get_dynamic_array(field) \ + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) + +#undef __get_dynamic_array_len +#define __get_dynamic_array_len(field) \ + ((__entry->__data_loc_##field >> 16) & 0xffff) + +#undef __get_str +#define __get_str(field) (char *)__get_dynamic_array(field) + +#undef __get_bitmask +#define __get_bitmask(field) (char *)__get_dynamic_array(field) + +#undef __perf_addr +#define __perf_addr(a) (__addr = (a)) + +#undef __perf_count +#define __perf_count(c) (__count = (c)) + +#undef __perf_task +#define __perf_task(t) (__task = (t)) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace void \ +perf_trace_##call(void *__data, proto) \ +{ \ + struct ftrace_event_call *event_call = __data; \ + struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ + struct ftrace_raw_##call *entry; \ + struct pt_regs *__regs; \ + u64 __addr = 0, __count = 1; \ + struct task_struct *__task = NULL; \ + struct hlist_head *head; \ + int __entry_size; \ + int __data_size; \ + int rctx; \ + \ + __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ + \ + head = this_cpu_ptr(event_call->perf_events); \ + if (__builtin_constant_p(!__task) && !__task && \ + hlist_empty(head)) \ + return; \ + \ + __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ + sizeof(u64)); \ + __entry_size -= sizeof(u32); \ + \ + entry = perf_trace_buf_prepare(__entry_size, \ + event_call->event.type, &__regs, &rctx); \ + if (!entry) \ + return; \ + \ + perf_fetch_caller_regs(__regs); \ + \ + tstruct \ + \ + { assign; } \ + \ + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ + __count, __regs, head, __task); \ +} + +/* + * This part is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the + * perf probe will fail to compile unless it too is updated. + */ +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ +static inline void perf_test_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(perf_trace_##template); \ +} + + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#endif /* CONFIG_PERF_EVENTS */ + diff --git a/kernel/include/trace/syscall.h b/kernel/include/trace/syscall.h new file mode 100644 index 000000000..9674145e2 --- /dev/null +++ b/kernel/include/trace/syscall.h @@ -0,0 +1,50 @@ +#ifndef _TRACE_SYSCALL_H +#define _TRACE_SYSCALL_H + +#include <linux/tracepoint.h> +#include <linux/unistd.h> +#include <linux/ftrace_event.h> +#include <linux/thread_info.h> + +#include <asm/ptrace.h> + + +/* + * A syscall entry in the ftrace syscalls array. + * + * @name: name of the syscall + * @syscall_nr: number of the syscall + * @nb_args: number of parameters it takes + * @types: list of types as strings + * @args: list of args as strings (args[i] matches types[i]) + * @enter_fields: list of fields for syscall_enter trace event + * @enter_event: associated syscall_enter trace event + * @exit_event: associated syscall_exit trace event + */ +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + + struct ftrace_event_call *enter_event; + struct ftrace_event_call *exit_event; +}; + +#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) +static inline void syscall_tracepoint_update(struct task_struct *p) +{ + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT); + else + clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT); +} +#else +static inline void syscall_tracepoint_update(struct task_struct *p) +{ +} +#endif + +#endif /* _TRACE_SYSCALL_H */ |