summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h')
-rw-r--r--kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h143
1 files changed, 143 insertions, 0 deletions
diff --git a/kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
new file mode 100644
index 000000000..a0f0ae53a
--- /dev/null
+++ b/kernel/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _GPU_SCHEDULER_H_
+#define _GPU_SCHEDULER_H_
+
+#include <linux/kfifo.h>
+#include <linux/fence.h>
+
+#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
+
+struct amd_gpu_scheduler;
+struct amd_sched_rq;
+
+extern struct kmem_cache *sched_fence_slab;
+extern atomic_t sched_fence_slab_ref;
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct amd_sched_entity {
+ struct list_head list;
+ struct amd_sched_rq *rq;
+ struct amd_gpu_scheduler *sched;
+
+ spinlock_t queue_lock;
+ struct kfifo job_queue;
+
+ atomic_t fence_seq;
+ uint64_t fence_context;
+
+ struct fence *dependency;
+ struct fence_cb cb;
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct amd_sched_rq {
+ spinlock_t lock;
+ struct list_head entities;
+ struct amd_sched_entity *current_entity;
+};
+
+struct amd_sched_fence {
+ struct fence base;
+ struct fence_cb cb;
+ struct list_head scheduled_cb;
+ struct amd_gpu_scheduler *sched;
+ spinlock_t lock;
+ void *owner;
+ struct delayed_work dwork;
+ struct list_head list;
+};
+
+struct amd_sched_job {
+ struct amd_gpu_scheduler *sched;
+ struct amd_sched_entity *s_entity;
+ struct amd_sched_fence *s_fence;
+};
+
+extern const struct fence_ops amd_sched_fence_ops;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+{
+ struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+
+ if (__f->base.ops == &amd_sched_fence_ops)
+ return __f;
+
+ return NULL;
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct amd_sched_backend_ops {
+ struct fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct fence *(*run_job)(struct amd_sched_job *sched_job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct amd_gpu_scheduler {
+ struct amd_sched_backend_ops *ops;
+ uint32_t hw_submission_limit;
+ long timeout;
+ const char *name;
+ struct amd_sched_rq sched_rq;
+ struct amd_sched_rq kernel_rq;
+ wait_queue_head_t wake_up_worker;
+ wait_queue_head_t job_scheduled;
+ atomic_t hw_rq_count;
+ struct list_head fence_list;
+ spinlock_t fence_list_lock;
+ struct task_struct *thread;
+};
+
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_backend_ops *ops,
+ uint32_t hw_submission, long timeout, const char *name);
+void amd_sched_fini(struct amd_gpu_scheduler *sched);
+
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq,
+ uint32_t jobs);
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity);
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+struct amd_sched_fence *amd_sched_fence_create(
+ struct amd_sched_entity *s_entity, void *owner);
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
+void amd_sched_fence_signal(struct amd_sched_fence *fence);
+
+#endif