summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/gpu/drm/i915/intel_ringbuffer.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/gpu/drm/i915/intel_ringbuffer.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r--kernel/drivers/gpu/drm/i915/intel_ringbuffer.h122
1 files changed, 87 insertions, 35 deletions
diff --git a/kernel/drivers/gpu/drm/i915/intel_ringbuffer.h b/kernel/drivers/gpu/drm/i915/intel_ringbuffer.h
index 94514d364..49fa41dc0 100644
--- a/kernel/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/kernel/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,6 +2,7 @@
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
+#include "i915_gem_batch_pool.h"
#define I915_CMD_HASH_ORDER 9
@@ -11,6 +12,7 @@
* workarounds!
*/
#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -104,6 +106,9 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
+ int reserved_size;
+ int reserved_tail;
+ bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -117,6 +122,26 @@ struct intel_ringbuffer {
};
struct intel_context;
+struct drm_i915_reg_descriptor;
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct drm_i915_gem_object *obj;
+};
struct intel_engine_cs {
const char *name;
@@ -133,7 +158,15 @@ struct intel_engine_cs {
struct drm_device *dev;
struct intel_ringbuffer *buffer;
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -143,15 +176,14 @@ struct intel_engine_cs {
int (*init_hw)(struct intel_engine_cs *ring);
- int (*init_context)(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+ int (*init_context)(struct drm_i915_gem_request *req);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
- int __must_check (*flush)(struct intel_engine_cs *ring,
+ int __must_check (*flush)(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_engine_cs *ring);
+ int (*add_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -162,11 +194,12 @@ struct intel_engine_cs {
bool lazy_coherency);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
- int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
+ int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS 0x4
void (*cleanup)(struct intel_engine_cs *ring);
/* GEN8 signal/wait table - never trust comments!
@@ -220,10 +253,10 @@ struct intel_engine_cs {
};
/* AKA wait() */
- int (*sync_to)(struct intel_engine_cs *ring,
- struct intel_engine_cs *to,
+ int (*sync_to)(struct drm_i915_gem_request *to_req,
+ struct intel_engine_cs *from,
u32 seqno);
- int (*signal)(struct intel_engine_cs *signaller,
+ int (*signal)(struct drm_i915_gem_request *signaller_req,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
@@ -234,14 +267,11 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
- int (*emit_request)(struct intel_ringbuffer *ringbuf,
- struct drm_i915_gem_request *request);
- int (*emit_flush)(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+ int (*emit_request)(struct drm_i915_gem_request *request);
+ int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains);
- int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+ int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags);
/**
@@ -263,10 +293,6 @@ struct intel_engine_cs {
struct list_head request_list;
/**
- * Do we have some not yet emitted requests outstanding?
- */
- struct drm_i915_gem_request *outstanding_lazy_request;
- /**
* Seqno of request most recently submitted to request_list.
* Used exclusively by hang checker to avoid grabbing lock while
* inspecting request list.
@@ -299,14 +325,14 @@ struct intel_engine_cs {
/*
* Table of registers allowed in commands that read/write registers.
*/
- const u32 *reg_table;
+ const struct drm_i915_reg_descriptor *reg_table;
int reg_count;
/*
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
- const u32 *master_reg_table;
+ const struct drm_i915_reg_descriptor *master_reg_table;
int master_reg_count;
/*
@@ -351,6 +377,13 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
return idx;
}
+static inline void
+intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+{
+ drm_clflush_virt_range(&ring->status_page.page_addr[reg],
+ sizeof(uint32_t));
+}
+
static inline u32
intel_read_status_page(struct intel_engine_cs *ring,
int reg)
@@ -387,18 +420,20 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-int intel_alloc_ringbuffer_obj(struct drm_device *dev,
- struct intel_ringbuffer *ringbuf);
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+void intel_ringbuffer_free(struct intel_ringbuffer *ring);
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
@@ -415,12 +450,11 @@ int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -440,11 +474,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail;
}
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
- BUG_ON(ring->outstanding_lazy_request == NULL);
- return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST 160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
#endif /* _INTEL_RINGBUFFER_H_ */