aboutsummaryrefslogtreecommitdiffstats
path: root/ansible/roles/fetch_spec_cpu2006_result/tasks
ModeNameSize
-rw-r--r--main.yaml603logstatsplain
2 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
#ifndef __LINUX_SPINLOCK_RT_H
#define __LINUX_SPINLOCK_RT_H

#ifndef __LINUX_SPINLOCK_H
#error Do not include directly. Use spinlock.h
#endif

#include <linux/bug.h>

extern void
__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);

#define spin_lock_init(slock)				\
do {							\
	static struct lock_class_key __key;		\
							\
	rt_mutex_init(&(slock)->lock);			\
	__rt_spin_lock_init(slock, #slock, &__key);	\
} while (0)

void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);

extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);

/*
 * lockdep-less calls, for derived types like rwlock:
 * (for trylock they can use rt_mutex_trylock() directly.
 */
extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);

#define spin_lock(lock)			rt_spin_lock(lock)

#define spin_lock_bh(lock)			\
	do {					\
		local_bh_disable();		\
		rt_spin_lock(lock);		\
	} while (0)

#define spin_lock_irq(lock)		spin_lock(lock)

#define spin_do_trylock(lock)		__cond_lock(lock, rt_spin_trylock(lock))

#define spin_trylock(lock)			\
({						\
	int __locked;				\
	__locked = spin_do_trylock(lock);	\
	__locked;				\
})

#ifdef CONFIG_LOCKDEP
# define spin_lock_nested(lock, subclass)		\
	do {						\
		rt_spin_lock_nested(lock, subclass);	\
	} while (0)

#define spin_lock_bh_nested(lock, subclass)		\
	do {						\
		local_bh_disable();			\
		rt_spin_lock_nested(lock, subclass);	\
	} while (0)

# define spin_lock_irqsave_nested(lock, flags, subclass) \
	do {						 \
		typecheck(unsigned long, flags);	 \
		flags = 0;				 \
		rt_spin_lock_nested(lock, subclass);	 \
	} while (0)
#else
# define spin_lock_nested(lock, subclass)	spin_lock(lock)
# define spin_lock_bh_nested(lock, subclass)	spin_lock_bh(lock)

# define spin_lock_irqsave_nested(lock, flags, subclass) \
	do {						 \
		typecheck(unsigned long, flags);	 \
		flags = 0;				 \
		spin_lock(lock);			 \
	} while (0)
#endif

#define spin_lock_irqsave(lock, flags)			 \
	do {						 \
		typecheck(unsigned long, flags);	 \
		flags = 0;				 \
		spin_lock(lock);			 \
	} while (0)

static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
{
	unsigned long flags = 0;
#ifdef CONFIG_TRACE_IRQFLAGS
	flags = rt_spin_lock_trace_flags(lock);
#else
	spin_lock(lock); /* lock_local */
#endif
	return flags;
}

/* FIXME: we need rt_spin_lock_nest_lock */
#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)

#define spin_unlock(lock)			rt_spin_unlock(lock)

#define spin_unlock_bh(lock)				\
	do {						\
		rt_spin_unlock(lock);			\
		local_bh_enable();			\
	} while (0)

#define spin_unlock_irq(lock)		spin_unlock(lock)

#define spin_unlock_irqrestore(lock, flags)		\
	do {						\
		typecheck(unsigned long, flags);	\
		(void) flags;				\
		spin_unlock(lock);			\
	} while (0)

#define spin_trylock_bh(lock)	__cond_lock(lock, rt_spin_trylock_bh(lock))
#define spin_trylock_irq(lock)	spin_trylock(lock)

#define spin_trylock_irqsave(lock, flags)	\
	rt_spin_trylock_irqsave(lock, &(flags))

#define spin_unlock_wait(lock)		rt_spin_unlock_wait(lock)

#ifdef CONFIG_GENERIC_LOCKBREAK
# define spin_is_contended(lock)	((lock)->break_lock)
#else
# define spin_is_contended(lock)	(((void)(lock), 0))
#endif

static inline int spin_can_lock(spinlock_t *lock)
{
	return !rt_mutex_is_locked(&lock->lock);
}

static inline int spin_is_locked(spinlock_t *lock)
{
	return rt_mutex_is_locked(&lock->lock);
}

static inline void assert_spin_locked(spinlock_t *lock)
{
	BUG_ON(!spin_is_locked(lock));
}

#define atomic_dec_and_lock(atomic, lock) \
	atomic_dec_and_spin_lock(atomic, lock)

#endif