summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/context_tracking_state.h
blob: 6b7b96a32b753a614ef36ea8bb6c1d5a83641b21 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
#define _LINUX_CONTEXT_TRACKING_STATE_H

#include <linux/percpu.h>
#include <linux/static_key.h>

struct context_tracking {
	/*
	 * When active is false, probes are unset in order
	 * to minimize overhead: TIF flags are cleared
	 * and calls to user_enter/exit are ignored. This
	 * may be further optimized using static keys.
	 */
	bool active;
	enum ctx_state {
		CONTEXT_KERNEL = 0,
		CONTEXT_USER,
		CONTEXT_GUEST,
	} state;
};

#ifdef CONFIG_CONTEXT_TRACKING
extern struct static_key context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking);

static inline bool context_tracking_is_enabled(void)
{
	return static_key_false(&context_tracking_enabled);
}

static inline bool context_tracking_cpu_is_enabled(void)
{
	return __this_cpu_read(context_tracking.active);
}

static inline bool context_tracking_in_user(void)
{
	return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
static inline bool context_tracking_is_enabled(void) { return false; }
static inline bool context_tracking_cpu_is_enabled(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */

#endif
pan> */ struct prop_local_percpu { /* * the local events counter */ struct percpu_counter events; /* * snapshot of the last seen global state */ int shift; unsigned long period; raw_spinlock_t lock; /* protect the snapshot state */ }; int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp); void prop_local_destroy_percpu(struct prop_local_percpu *pl); void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, long *numerator, long *denominator); static inline void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) { unsigned long flags; local_irq_save(flags); __prop_inc_percpu(pd, pl); local_irq_restore(flags); } /* * Limit the time part in order to ensure there are some bits left for the * cycle counter and fraction multiply. */ #if BITS_PER_LONG == 32 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) #else #define PROP_MAX_SHIFT (BITS_PER_LONG/2) #endif #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1) #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT) void __prop_inc_percpu_max(struct prop_descriptor *pd, struct prop_local_percpu *pl, long frac); /* * ----- SINGLE ------ */ struct prop_local_single { /* * the local events counter */ unsigned long events; /* * snapshot of the last seen global state * and a lock protecting this state */ unsigned long period; int shift; raw_spinlock_t lock; /* protect the snapshot state */ }; #define INIT_PROP_LOCAL_SINGLE(name) \ { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ } int prop_local_init_single(struct prop_local_single *pl); void prop_local_destroy_single(struct prop_local_single *pl); void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, long *numerator, long *denominator); static inline void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) { unsigned long flags; local_irq_save(flags); __prop_inc_single(pd, pl); local_irq_restore(flags); } #endif /* _LINUX_PROPORTIONS_H */