summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/gpu/drm/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/gpu/drm/virtio')
-rw-r--r--kernel/drivers/gpu/drm/virtio/Kconfig14
-rw-r--r--kernel/drivers/gpu/drm/virtio/Makefile12
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_debugfs.c64
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_display.c524
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_drm_bus.c95
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_drv.c162
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_drv.h424
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_fb.c417
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_fence.c119
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_gem.c181
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_ioctl.c573
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_kms.c306
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_object.c165
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_plane.c120
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_prime.c71
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_ttm.c468
-rw-r--r--kernel/drivers/gpu/drm/virtio/virtgpu_vq.c925
17 files changed, 4640 insertions, 0 deletions
diff --git a/kernel/drivers/gpu/drm/virtio/Kconfig b/kernel/drivers/gpu/drm/virtio/Kconfig
new file mode 100644
index 000000000..9983eadb8
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/Kconfig
@@ -0,0 +1,14 @@
+config DRM_VIRTIO_GPU
+ tristate "Virtio GPU driver"
+ depends on DRM && VIRTIO
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_TTM
+ help
+ This is the virtual GPU driver for virtio. It can be used with
+ QEMU based VMMs (like KVM or Xen).
+
+ If unsure say M.
diff --git a/kernel/drivers/gpu/drm/virtio/Makefile b/kernel/drivers/gpu/drm/virtio/Makefile
new file mode 100644
index 000000000..3fb8eac10
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
+ virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
+ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
+ virtgpu_ioctl.o virtgpu_prime.o
+
+obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/kernel/drivers/gpu/drm/virtio/virtgpu_debugfs.c
new file mode 100644
index 000000000..512263919
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "virtgpu_drv.h"
+
+static int
+virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+
+ seq_printf(m, "fence %llu %lld\n",
+ (u64)atomic64_read(&vgdev->fence_drv.last_seq),
+ vgdev->fence_drv.sync_seq);
+ return 0;
+}
+
+static struct drm_info_list virtio_gpu_debugfs_list[] = {
+ { "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+};
+
+#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
+
+int
+virtio_gpu_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(virtio_gpu_debugfs_list,
+ VIRTIO_GPU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ return 0;
+}
+
+void
+virtio_gpu_debugfs_takedown(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(virtio_gpu_debugfs_list,
+ VIRTIO_GPU_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_display.c b/kernel/drivers/gpu/drm/virtio/virtgpu_display.c
new file mode 100644
index 000000000..578fe0a93
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -0,0 +1,524 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie
+ * Alon Levy
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "virtgpu_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+#define XRES_MIN 320
+#define YRES_MIN 200
+
+#define XRES_DEF 1024
+#define YRES_DEF 768
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+static void virtio_gpu_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t start, uint32_t size)
+{
+ /* TODO */
+}
+
+static void
+virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_output *output)
+{
+ output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+ output->cursor.resource_id = 0;
+ virtio_gpu_cursor_ping(vgdev, output);
+}
+
+static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x, int32_t hot_y)
+{
+ struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
+ struct virtio_gpu_output *output =
+ container_of(crtc, struct virtio_gpu_output, crtc);
+ struct drm_gem_object *gobj = NULL;
+ struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_fence *fence = NULL;
+ int ret = 0;
+
+ if (handle == 0) {
+ virtio_gpu_hide_cursor(vgdev, output);
+ return 0;
+ }
+
+ /* lookup the cursor */
+ gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (gobj == NULL)
+ return -ENOENT;
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+
+ if (!qobj->hw_res_handle) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
+ cpu_to_le32(64),
+ cpu_to_le32(64),
+ 0, 0, &fence);
+ ret = virtio_gpu_object_reserve(qobj, false);
+ if (!ret) {
+ reservation_object_add_excl_fence(qobj->tbo.resv,
+ &fence->f);
+ fence_put(&fence->f);
+ virtio_gpu_object_unreserve(qobj);
+ virtio_gpu_object_wait(qobj, false);
+ }
+
+ output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+ output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
+ output->cursor.hot_x = cpu_to_le32(hot_x);
+ output->cursor.hot_y = cpu_to_le32(hot_y);
+ virtio_gpu_cursor_ping(vgdev, output);
+ ret = 0;
+
+out:
+ drm_gem_object_unreference_unlocked(gobj);
+ return ret;
+}
+
+static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
+ struct virtio_gpu_output *output =
+ container_of(crtc, struct virtio_gpu_output, crtc);
+
+ output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
+ output->cursor.pos.x = cpu_to_le32(x);
+ output->cursor.pos.y = cpu_to_le32(y);
+ virtio_gpu_cursor_ping(vgdev, output);
+ return 0;
+}
+
+static int virtio_gpu_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
+ struct virtio_gpu_output *output =
+ container_of(crtc, struct virtio_gpu_output, crtc);
+ struct drm_plane *plane = crtc->primary;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+ unsigned long irqflags;
+ uint32_t handle;
+
+ plane->fb = fb;
+ vgfb = to_virtio_gpu_framebuffer(plane->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ handle = bo->hw_res_handle;
+
+ DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
+ bo->dumb ? ", dumb" : "",
+ crtc->mode.hdisplay, crtc->mode.vdisplay);
+ if (bo->dumb) {
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, handle, 0,
+ cpu_to_le32(crtc->mode.hdisplay),
+ cpu_to_le32(crtc->mode.vdisplay),
+ 0, 0, NULL);
+ }
+ virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
+ crtc->mode.hdisplay,
+ crtc->mode.vdisplay, 0, 0);
+ virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
+ crtc->mode.hdisplay,
+ crtc->mode.vdisplay);
+
+ if (event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
+ drm_send_vblank_event(crtc->dev, -1, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
+ }
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
+ .cursor_set2 = virtio_gpu_crtc_cursor_set,
+ .cursor_move = virtio_gpu_crtc_cursor_move,
+ .gamma_set = virtio_gpu_crtc_gamma_set,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+
+ .page_flip = virtio_gpu_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct virtio_gpu_framebuffer *virtio_gpu_fb
+ = to_virtio_gpu_framebuffer(fb);
+
+ if (virtio_gpu_fb->obj)
+ drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(virtio_gpu_fb);
+}
+
+static int
+virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct virtio_gpu_framebuffer *virtio_gpu_fb
+ = to_virtio_gpu_framebuffer(fb);
+
+ return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
+}
+
+static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
+ .destroy = virtio_gpu_user_framebuffer_destroy,
+ .dirty = virtio_gpu_framebuffer_surface_dirty,
+};
+
+int
+virtio_gpu_framebuffer_init(struct drm_device *dev,
+ struct virtio_gpu_framebuffer *vgfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+ struct virtio_gpu_object *bo;
+ vgfb->obj = obj;
+
+ bo = gem_to_virtio_gpu_obj(obj);
+
+ ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
+ if (ret) {
+ vgfb->obj = NULL;
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&vgfb->base, mode_cmd);
+
+ spin_lock_init(&vgfb->dirty_lock);
+ vgfb->x1 = vgfb->y1 = INT_MAX;
+ vgfb->x2 = vgfb->y2 = 0;
+ return 0;
+}
+
+static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
+ crtc->mode.hdisplay,
+ crtc->mode.vdisplay, 0, 0);
+}
+
+static void virtio_gpu_crtc_enable(struct drm_crtc *crtc)
+{
+}
+
+static void virtio_gpu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+}
+
+static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
+ .enable = virtio_gpu_crtc_enable,
+ .disable = virtio_gpu_crtc_disable,
+ .mode_fixup = virtio_gpu_crtc_mode_fixup,
+ .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
+ .atomic_check = virtio_gpu_crtc_atomic_check,
+};
+
+static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void virtio_gpu_enc_enable(struct drm_encoder *encoder)
+{
+}
+
+static void virtio_gpu_enc_disable(struct drm_encoder *encoder)
+{
+}
+
+static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
+{
+ struct virtio_gpu_output *output =
+ drm_connector_to_virtio_gpu_output(connector);
+ struct drm_display_mode *mode = NULL;
+ int count, width, height;
+
+ width = le32_to_cpu(output->info.r.width);
+ height = le32_to_cpu(output->info.r.height);
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+
+ if (width == 0 || height == 0) {
+ width = XRES_DEF;
+ height = YRES_DEF;
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+ } else {
+ DRM_DEBUG("add mode: %dx%d\n", width, height);
+ mode = drm_cvt_mode(connector->dev, width, height, 60,
+ false, false, false);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ count++;
+ }
+
+ return count;
+}
+
+static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct virtio_gpu_output *output =
+ drm_connector_to_virtio_gpu_output(connector);
+ int width, height;
+
+ width = le32_to_cpu(output->info.r.width);
+ height = le32_to_cpu(output->info.r.height);
+
+ if (!(mode->type & DRM_MODE_TYPE_PREFERRED))
+ return MODE_OK;
+ if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF)
+ return MODE_OK;
+ if (mode->hdisplay <= width && mode->hdisplay >= width - 16 &&
+ mode->vdisplay <= height && mode->vdisplay >= height - 16)
+ return MODE_OK;
+
+ DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay);
+ return MODE_BAD;
+}
+
+static struct drm_encoder*
+virtio_gpu_best_encoder(struct drm_connector *connector)
+{
+ struct virtio_gpu_output *virtio_gpu_output =
+ drm_connector_to_virtio_gpu_output(connector);
+
+ return &virtio_gpu_output->enc;
+}
+
+static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
+ .mode_fixup = virtio_gpu_enc_mode_fixup,
+ .mode_set = virtio_gpu_enc_mode_set,
+ .enable = virtio_gpu_enc_enable,
+ .disable = virtio_gpu_enc_disable,
+};
+
+static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
+ .get_modes = virtio_gpu_conn_get_modes,
+ .mode_valid = virtio_gpu_conn_mode_valid,
+ .best_encoder = virtio_gpu_best_encoder,
+};
+
+static void virtio_gpu_conn_save(struct drm_connector *connector)
+{
+ DRM_DEBUG("\n");
+}
+
+static void virtio_gpu_conn_restore(struct drm_connector *connector)
+{
+ DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status virtio_gpu_conn_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct virtio_gpu_output *output =
+ drm_connector_to_virtio_gpu_output(connector);
+
+ if (output->info.enabled)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void virtio_gpu_conn_destroy(struct drm_connector *connector)
+{
+ struct virtio_gpu_output *virtio_gpu_output =
+ drm_connector_to_virtio_gpu_output(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(virtio_gpu_output);
+}
+
+static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .save = virtio_gpu_conn_save,
+ .restore = virtio_gpu_conn_restore,
+ .detect = virtio_gpu_conn_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
+ .destroy = virtio_gpu_conn_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
+{
+ struct drm_device *dev = vgdev->ddev;
+ struct virtio_gpu_output *output = vgdev->outputs + index;
+ struct drm_connector *connector = &output->conn;
+ struct drm_encoder *encoder = &output->enc;
+ struct drm_crtc *crtc = &output->crtc;
+ struct drm_plane *plane;
+
+ output->index = index;
+ if (index == 0) {
+ output->info.enabled = cpu_to_le32(true);
+ output->info.r.width = cpu_to_le32(XRES_DEF);
+ output->info.r.height = cpu_to_le32(YRES_DEF);
+ }
+
+ plane = virtio_gpu_plane_init(vgdev, index);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &virtio_gpu_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
+
+ drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+ drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
+ encoder->possible_crtcs = 1 << index;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_register(connector);
+ return 0;
+}
+
+static struct drm_framebuffer *
+virtio_gpu_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj = NULL;
+ struct virtio_gpu_framebuffer *virtio_gpu_fb;
+ int ret;
+
+ /* lookup object associated with res handle */
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-EINVAL);
+
+ virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
+ if (virtio_gpu_fb == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(virtio_gpu_fb);
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ return NULL;
+ }
+
+ return &virtio_gpu_fb->base;
+}
+
+static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
+ .fb_create = virtio_gpu_user_framebuffer_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
+{
+ int i;
+
+ drm_mode_config_init(vgdev->ddev);
+ vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
+
+ /* modes will be validated against the framebuffer size */
+ vgdev->ddev->mode_config.min_width = XRES_MIN;
+ vgdev->ddev->mode_config.min_height = YRES_MIN;
+ vgdev->ddev->mode_config.max_width = XRES_MAX;
+ vgdev->ddev->mode_config.max_height = YRES_MAX;
+
+ for (i = 0 ; i < vgdev->num_scanouts; ++i)
+ vgdev_output_init(vgdev, i);
+
+ drm_mode_config_reset(vgdev->ddev);
+ return 0;
+}
+
+void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
+{
+ virtio_gpu_fbdev_fini(vgdev);
+ drm_mode_config_cleanup(vgdev->ddev);
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/kernel/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
new file mode 100644
index 000000000..88a39165e
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "virtgpu_drv.h"
+
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (pdev) {
+ return drm_pci_set_busid(dev, master);
+ }
+ return 0;
+}
+
+static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
+{
+ struct apertures_struct *ap;
+ bool primary;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = pci_resource_start(pci_dev, 0);
+ ap->ranges[0].size = pci_resource_len(pci_dev, 0);
+
+ primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
+ & IORESOURCE_ROM_SHADOW;
+
+ remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
+
+ kfree(ap);
+}
+
+int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = drm_dev_alloc(driver, &vdev->dev);
+ if (!dev)
+ return -ENOMEM;
+ dev->virtdev = vdev;
+ vdev->priv = dev;
+
+ if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
+ struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+
+ DRM_INFO("pci: %s detected\n",
+ vga ? "virtio-vga" : "virtio-gpu-pci");
+ dev->pdev = pdev;
+ if (vga)
+ virtio_pci_kick_out_firmware_fb(pdev);
+ }
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_free;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+ driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(dev);
+ return ret;
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_drv.c b/kernel/drivers/gpu/drm/virtio/virtgpu_drv.c
new file mode 100644
index 000000000..b40ed6061
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/pci.h>
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "virtgpu_drv.h"
+static struct drm_driver driver;
+
+static int virtio_gpu_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, virtio_gpu_modeset, int, 0400);
+
+static int virtio_gpu_probe(struct virtio_device *vdev)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && virtio_gpu_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (virtio_gpu_modeset == 0)
+ return -EINVAL;
+
+ return drm_virtio_init(&driver, vdev);
+}
+
+static void virtio_gpu_remove(struct virtio_device *vdev)
+{
+ struct drm_device *dev = vdev->priv;
+ drm_put_dev(dev);
+}
+
+static void virtio_gpu_config_changed(struct virtio_device *vdev)
+{
+ struct drm_device *dev = vdev->priv;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+
+ schedule_work(&vgdev->config_changed_work);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+#ifdef __LITTLE_ENDIAN
+ /*
+ * Gallium command stream send by virgl is native endian.
+ * Because of that we only support little endian guests on
+ * little endian hosts.
+ */
+ VIRTIO_GPU_F_VIRGL,
+#endif
+};
+static struct virtio_driver virtio_gpu_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtio_gpu_probe,
+ .remove = virtio_gpu_remove,
+ .config_changed = virtio_gpu_config_changed
+};
+
+module_virtio_driver(virtio_gpu_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio GPU driver");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_AUTHOR("Alon Levy");
+
+static const struct file_operations virtio_gpu_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = virtio_gpu_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+ .set_busid = drm_virtio_set_busid,
+ .load = virtio_gpu_driver_load,
+ .unload = virtio_gpu_driver_unload,
+ .open = virtio_gpu_driver_open,
+ .postclose = virtio_gpu_driver_postclose,
+
+ .dumb_create = virtio_gpu_mode_dumb_create,
+ .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
+ .dumb_destroy = virtio_gpu_mode_dumb_destroy,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = virtio_gpu_debugfs_init,
+ .debugfs_cleanup = virtio_gpu_debugfs_takedown,
+#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = virtgpu_gem_prime_pin,
+ .gem_prime_unpin = virtgpu_gem_prime_unpin,
+ .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
+ .gem_prime_vmap = virtgpu_gem_prime_vmap,
+ .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
+ .gem_prime_mmap = virtgpu_gem_prime_mmap,
+
+ .gem_free_object = virtio_gpu_gem_free_object,
+ .gem_open_object = virtio_gpu_gem_object_open,
+ .gem_close_object = virtio_gpu_gem_object_close,
+ .fops = &virtio_gpu_driver_fops,
+
+ .ioctls = virtio_gpu_ioctls,
+ .num_ioctls = DRM_VIRTIO_NUM_IOCTLS,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_drv.h b/kernel/drivers/gpu/drm/virtio/virtgpu_drv.h
new file mode 100644
index 000000000..79f0abe69
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -0,0 +1,424 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VIRTIO_DRV_H
+#define VIRTIO_DRV_H
+
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_gpu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_crtc_helper.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#define DRIVER_NAME "virtio_gpu"
+#define DRIVER_DESC "virtio GPU"
+#define DRIVER_DATE "0"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 1
+
+/* virtgpu_drm_bus.c */
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
+int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
+
+struct virtio_gpu_object {
+ struct drm_gem_object gem_base;
+ uint32_t hw_res_handle;
+
+ struct sg_table *pages;
+ void *vmap;
+ bool dumb;
+ struct ttm_place placement_code;
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+};
+#define gem_to_virtio_gpu_obj(gobj) \
+ container_of((gobj), struct virtio_gpu_object, gem_base)
+
+struct virtio_gpu_vbuffer;
+struct virtio_gpu_device;
+
+typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf);
+
+struct virtio_gpu_fence_driver {
+ atomic64_t last_seq;
+ uint64_t sync_seq;
+ struct list_head fences;
+ spinlock_t lock;
+};
+
+struct virtio_gpu_fence {
+ struct fence f;
+ struct virtio_gpu_fence_driver *drv;
+ struct list_head node;
+ uint64_t seq;
+};
+#define to_virtio_fence(x) \
+ container_of(x, struct virtio_gpu_fence, f)
+
+struct virtio_gpu_vbuffer {
+ char *buf;
+ int size;
+
+ void *data_buf;
+ uint32_t data_size;
+
+ char *resp_buf;
+ int resp_size;
+
+ virtio_gpu_resp_cb resp_cb;
+
+ struct list_head list;
+};
+
+struct virtio_gpu_output {
+ int index;
+ struct drm_crtc crtc;
+ struct drm_connector conn;
+ struct drm_encoder enc;
+ struct virtio_gpu_display_one info;
+ struct virtio_gpu_update_cursor cursor;
+ int cur_x;
+ int cur_y;
+};
+#define drm_crtc_to_virtio_gpu_output(x) \
+ container_of(x, struct virtio_gpu_output, crtc)
+#define drm_connector_to_virtio_gpu_output(x) \
+ container_of(x, struct virtio_gpu_output, conn)
+#define drm_encoder_to_virtio_gpu_output(x) \
+ container_of(x, struct virtio_gpu_output, enc)
+
+struct virtio_gpu_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+ uint32_t hw_res_handle;
+};
+#define to_virtio_gpu_framebuffer(x) \
+ container_of(x, struct virtio_gpu_framebuffer, base)
+
+struct virtio_gpu_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct drm_global_reference mem_global_ref;
+ bool mem_global_referenced;
+ struct ttm_bo_device bdev;
+};
+
+struct virtio_gpu_fbdev;
+
+struct virtio_gpu_queue {
+ struct virtqueue *vq;
+ spinlock_t qlock;
+ wait_queue_head_t ack_queue;
+ struct work_struct dequeue_work;
+};
+
+struct virtio_gpu_drv_capset {
+ uint32_t id;
+ uint32_t max_version;
+ uint32_t max_size;
+};
+
+struct virtio_gpu_drv_cap_cache {
+ struct list_head head;
+ void *caps_cache;
+ uint32_t id;
+ uint32_t version;
+ uint32_t size;
+ atomic_t is_valid;
+};
+
+struct virtio_gpu_device {
+ struct device *dev;
+ struct drm_device *ddev;
+
+ struct virtio_device *vdev;
+
+ struct virtio_gpu_mman mman;
+
+ /* pointer to fbdev info structure */
+ struct virtio_gpu_fbdev *vgfbdev;
+ struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
+ uint32_t num_scanouts;
+
+ struct virtio_gpu_queue ctrlq;
+ struct virtio_gpu_queue cursorq;
+ struct list_head free_vbufs;
+ spinlock_t free_vbufs_lock;
+ void *vbufs;
+ bool vqs_ready;
+
+ struct idr resource_idr;
+ spinlock_t resource_idr_lock;
+
+ wait_queue_head_t resp_wq;
+ /* current display info */
+ spinlock_t display_info_lock;
+ bool display_info_pending;
+
+ struct virtio_gpu_fence_driver fence_drv;
+
+ struct idr ctx_id_idr;
+ spinlock_t ctx_id_idr_lock;
+
+ bool has_virgl_3d;
+
+ struct work_struct config_changed_work;
+
+ struct virtio_gpu_drv_capset *capsets;
+ uint32_t num_capsets;
+ struct list_head cap_cache;
+};
+
+struct virtio_gpu_fpriv {
+ uint32_t ctx_id;
+};
+
+/* virtio_ioctl.c */
+#define DRM_VIRTIO_NUM_IOCTLS 10
+extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+
+/* virtio_kms.c */
+int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
+int virtio_gpu_driver_unload(struct drm_device *dev);
+int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
+void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
+
+/* virtio_gem.c */
+void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
+int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
+int virtio_gpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p);
+int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file);
+void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file);
+struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
+ size_t size, bool kernel,
+ bool pinned);
+int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+
+/* virtio_fb */
+#define VIRTIO_GPUFB_CONN_LIMIT 1
+int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
+int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
+ struct drm_clip_rect *clips,
+ unsigned num_clips);
+/* virtio vg */
+int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
+void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
+void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid);
+void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
+void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t format,
+ uint32_t width,
+ uint32_t height);
+void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id);
+void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id, uint64_t offset,
+ __le32 width, __le32 height,
+ __le32 x, __le32 y,
+ struct virtio_gpu_fence **fence);
+void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height);
+void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id, uint32_t resource_id,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y);
+int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ uint32_t resource_id,
+ struct virtio_gpu_fence **fence);
+int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
+int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
+void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_output *output);
+int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
+void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id);
+int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
+int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
+ int idx, int version,
+ struct virtio_gpu_drv_cap_cache **cache_p);
+void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
+ uint32_t nlen, const char *name);
+void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
+ uint32_t id);
+void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
+ uint32_t ctx_id,
+ uint32_t resource_id);
+void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
+ uint32_t ctx_id,
+ uint32_t resource_id);
+void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
+ void *data, uint32_t data_size,
+ uint32_t ctx_id, struct virtio_gpu_fence **fence);
+void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id, uint32_t ctx_id,
+ uint64_t offset, uint32_t level,
+ struct virtio_gpu_box *box,
+ struct virtio_gpu_fence **fence);
+void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id, uint32_t ctx_id,
+ uint64_t offset, uint32_t level,
+ struct virtio_gpu_box *box,
+ struct virtio_gpu_fence **fence);
+void
+virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_resource_create_3d *rc_3d,
+ struct virtio_gpu_fence **fence);
+void virtio_gpu_ctrl_ack(struct virtqueue *vq);
+void virtio_gpu_cursor_ack(struct virtqueue *vq);
+void virtio_gpu_fence_ack(struct virtqueue *vq);
+void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
+void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
+void virtio_gpu_dequeue_fence_func(struct work_struct *work);
+
+/* virtio_gpu_display.c */
+int virtio_gpu_framebuffer_init(struct drm_device *dev,
+ struct virtio_gpu_framebuffer *vgfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
+
+/* virtio_gpu_plane.c */
+struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ int index);
+
+/* virtio_gpu_ttm.c */
+int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
+int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* virtio_gpu_fence.c */
+int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_ctrl_hdr *cmd_hdr,
+ struct virtio_gpu_fence **fence);
+void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
+ u64 last_seq);
+
+/* virtio_gpu_object */
+int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ unsigned long size, bool kernel, bool pinned,
+ struct virtio_gpu_object **bo_ptr);
+int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr);
+int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+ struct virtio_gpu_object *bo);
+void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
+int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
+
+/* virtgpu_prime.c */
+int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
+void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
+void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+static inline struct virtio_gpu_object*
+virtio_gpu_object_ref(struct virtio_gpu_object *bo)
+{
+ ttm_bo_reference(&bo->tbo);
+ return bo;
+}
+
+static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+ tbo = &((*bo)->tbo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+}
+
+static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
+{
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+}
+
+static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
+ bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS) {
+ struct virtio_gpu_device *qdev =
+ bo->gem_base.dev->dev_private;
+ dev_err(qdev->dev, "%p reserve failed\n", bo);
+ }
+ return r;
+ }
+ return 0;
+}
+
+static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+/* virgl debufs */
+int virtio_gpu_debugfs_init(struct drm_minor *minor);
+void virtio_gpu_debugfs_takedown(struct drm_minor *minor);
+
+#endif
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_fb.c b/kernel/drivers/gpu/drm/virtio/virtgpu_fb.c
new file mode 100644
index 000000000..6a81e0845
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "virtgpu_drv.h"
+
+#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
+
+struct virtio_gpu_fbdev {
+ struct drm_fb_helper helper;
+ struct virtio_gpu_framebuffer vgfb;
+ struct list_head fbdev_list;
+ struct virtio_gpu_device *vgdev;
+ struct delayed_work work;
+};
+
+static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
+ bool store, int x, int y,
+ int width, int height)
+{
+ struct drm_device *dev = fb->base.dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ bool store_for_later = false;
+ int bpp = fb->base.bits_per_pixel / 8;
+ int x2, y2;
+ unsigned long flags;
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+
+ if ((width <= 0) ||
+ (x + width > fb->base.width) ||
+ (y + height > fb->base.height)) {
+ DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
+ width, height, x, y,
+ fb->base.width, fb->base.height);
+ return -EINVAL;
+ }
+
+ /*
+ * Can be called with pretty much any context (console output
+ * path). If we are in atomic just store the dirty rect info
+ * to send out the update later.
+ *
+ * Can't test inside spin lock.
+ */
+ if (in_atomic() || store)
+ store_for_later = true;
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+
+ spin_lock_irqsave(&fb->dirty_lock, flags);
+
+ if (fb->y1 < y)
+ y = fb->y1;
+ if (fb->y2 > y2)
+ y2 = fb->y2;
+ if (fb->x1 < x)
+ x = fb->x1;
+ if (fb->x2 > x2)
+ x2 = fb->x2;
+
+ if (store_for_later) {
+ fb->x1 = x;
+ fb->x2 = x2;
+ fb->y1 = y;
+ fb->y2 = y2;
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ return 0;
+ }
+
+ fb->x1 = fb->y1 = INT_MAX;
+ fb->x2 = fb->y2 = 0;
+
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+
+ {
+ uint32_t offset;
+ uint32_t w = x2 - x + 1;
+ uint32_t h = y2 - y + 1;
+
+ offset = (y * fb->base.pitches[0]) + x * bpp;
+
+ virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
+ offset,
+ cpu_to_le32(w),
+ cpu_to_le32(h),
+ cpu_to_le32(x),
+ cpu_to_le32(y),
+ NULL);
+
+ }
+ virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
+ x, y, x2 - x + 1, y2 - y + 1);
+ return 0;
+}
+
+int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+ struct drm_clip_rect norect;
+ struct drm_clip_rect *clips_ptr;
+ int left, right, top, bottom;
+ int i;
+ int inc = 1;
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = vgfb->base.width;
+ norect.y2 = vgfb->base.height;
+ }
+ left = clips->x1;
+ right = clips->x2;
+ top = clips->y1;
+ bottom = clips->y2;
+
+ /* skip the first clip rect */
+ for (i = 1, clips_ptr = clips + inc;
+ i < num_clips; i++, clips_ptr += inc) {
+ left = min_t(int, left, (int)clips_ptr->x1);
+ right = max_t(int, right, (int)clips_ptr->x2);
+ top = min_t(int, top, (int)clips_ptr->y1);
+ bottom = max_t(int, bottom, (int)clips_ptr->y2);
+ }
+
+ if (obj->dumb)
+ return virtio_gpu_dirty_update(vgfb, false, left, top,
+ right - left, bottom - top);
+
+ virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
+ left, top, right - left, bottom - top);
+ return 0;
+}
+
+static void virtio_gpu_fb_dirty_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct virtio_gpu_fbdev *vfbdev =
+ container_of(delayed_work, struct virtio_gpu_fbdev, work);
+ struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
+
+ virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
+ vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
+}
+
+static void virtio_gpu_3d_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct virtio_gpu_fbdev *vfbdev = info->par;
+ drm_fb_helper_sys_fillrect(info, rect);
+ virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
+ rect->width, rect->height);
+ schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
+}
+
+static void virtio_gpu_3d_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct virtio_gpu_fbdev *vfbdev = info->par;
+ drm_fb_helper_sys_copyarea(info, area);
+ virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
+ area->width, area->height);
+ schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
+}
+
+static void virtio_gpu_3d_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct virtio_gpu_fbdev *vfbdev = info->par;
+ drm_fb_helper_sys_imageblit(info, image);
+ virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
+ image->width, image->height);
+ schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
+}
+
+static struct fb_ops virtio_gpufb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ .fb_fillrect = virtio_gpu_3d_fillrect,
+ .fb_copyarea = virtio_gpu_3d_copyarea,
+ .fb_imageblit = virtio_gpu_3d_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj)
+{
+ return virtio_gpu_object_kmap(obj, NULL);
+}
+
+static int virtio_gpufb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct virtio_gpu_fbdev *vfbdev =
+ container_of(helper, struct virtio_gpu_fbdev, helper);
+ struct drm_device *dev = helper->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct virtio_gpu_object *obj;
+ uint32_t resid, format, size;
+ int ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * 4;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
+
+ switch (mode_cmd.pixel_format) {
+#ifdef __BIG_ENDIAN
+ case DRM_FORMAT_XRGB8888:
+ format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+ break;
+#else
+ case DRM_FORMAT_XRGB8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+ break;
+#endif
+ default:
+ DRM_ERROR("failed to find virtio gpu format for %d\n",
+ mode_cmd.pixel_format);
+ return -EINVAL;
+ }
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = virtio_gpu_alloc_object(dev, size, false, true);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ virtio_gpu_resource_id_get(vgdev, &resid);
+ virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ mode_cmd.width, mode_cmd.height);
+
+ ret = virtio_gpu_vmap_fb(vgdev, obj);
+ if (ret) {
+ DRM_ERROR("failed to vmap fb %d\n", ret);
+ goto err_obj_vmap;
+ }
+
+ /* attach the object to the resource */
+ ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+ if (ret)
+ goto err_obj_attach;
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_fb_alloc;
+ }
+
+ info->par = helper;
+
+ ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
+ &mode_cmd, &obj->gem_base);
+ if (ret)
+ goto err_fb_init;
+
+ fb = &vfbdev->vgfb.base;
+
+ vfbdev->helper.fb = fb;
+
+ strcpy(info->fix.id, "virtiodrmfb");
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &virtio_gpufb_ops;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ info->screen_base = obj->vmap;
+ info->screen_size = obj->gem_base.size;
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &vfbdev->helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ return 0;
+
+err_fb_init:
+ drm_fb_helper_release_fbi(helper);
+err_fb_alloc:
+ virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
+err_obj_attach:
+err_obj_vmap:
+ virtio_gpu_gem_free_object(&obj->gem_base);
+ return ret;
+}
+
+static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
+ struct virtio_gpu_fbdev *vgfbdev)
+{
+ struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
+
+ drm_fb_helper_unregister_fbi(&vgfbdev->helper);
+ drm_fb_helper_release_fbi(&vgfbdev->helper);
+
+ if (vgfb->obj)
+ vgfb->obj = NULL;
+ drm_fb_helper_fini(&vgfbdev->helper);
+ drm_framebuffer_cleanup(&vgfb->base);
+
+ return 0;
+}
+static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
+ .fb_probe = virtio_gpufb_create,
+};
+
+int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_fbdev *vgfbdev;
+ int bpp_sel = 32; /* TODO: parameter from somewhere? */
+ int ret;
+
+ vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
+ if (!vgfbdev)
+ return -ENOMEM;
+
+ vgfbdev->vgdev = vgdev;
+ vgdev->vgfbdev = vgfbdev;
+ INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
+
+ drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
+ &virtio_gpu_fb_helper_funcs);
+ ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
+ vgdev->num_scanouts,
+ VIRTIO_GPUFB_CONN_LIMIT);
+ if (ret) {
+ kfree(vgfbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
+ drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
+ return 0;
+}
+
+void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
+{
+ if (!vgdev->vgfbdev)
+ return;
+
+ virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
+ kfree(vgdev->vgfbdev);
+ vgdev->vgfbdev = NULL;
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_fence.c b/kernel/drivers/gpu/drm/virtio/virtgpu_fence.c
new file mode 100644
index 000000000..cf4418709
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+
+static const char *virtio_get_driver_name(struct fence *f)
+{
+ return "virtio_gpu";
+}
+
+static const char *virtio_get_timeline_name(struct fence *f)
+{
+ return "controlq";
+}
+
+static bool virtio_enable_signaling(struct fence *f)
+{
+ return true;
+}
+
+static bool virtio_signaled(struct fence *f)
+{
+ struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+ if (atomic64_read(&fence->drv->last_seq) >= fence->seq)
+ return true;
+ return false;
+}
+
+static void virtio_fence_value_str(struct fence *f, char *str, int size)
+{
+ struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+ snprintf(str, size, "%llu", fence->seq);
+}
+
+static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+{
+ struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+ snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
+}
+
+static const struct fence_ops virtio_fence_ops = {
+ .get_driver_name = virtio_get_driver_name,
+ .get_timeline_name = virtio_get_timeline_name,
+ .enable_signaling = virtio_enable_signaling,
+ .signaled = virtio_signaled,
+ .wait = fence_default_wait,
+ .fence_value_str = virtio_fence_value_str,
+ .timeline_value_str = virtio_timeline_value_str,
+};
+
+int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_ctrl_hdr *cmd_hdr,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ unsigned long irq_flags;
+
+ *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
+ if ((*fence) == NULL)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&drv->lock, irq_flags);
+ (*fence)->drv = drv;
+ (*fence)->seq = ++drv->sync_seq;
+ fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+ 0, (*fence)->seq);
+ fence_get(&(*fence)->f);
+ list_add_tail(&(*fence)->node, &drv->fences);
+ spin_unlock_irqrestore(&drv->lock, irq_flags);
+
+ cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
+ cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+ return 0;
+}
+
+void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
+ u64 last_seq)
+{
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct virtio_gpu_fence *fence, *tmp;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&drv->lock, irq_flags);
+ atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
+ list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
+ if (last_seq < fence->seq)
+ continue;
+ fence_signal_locked(&fence->f);
+ list_del(&fence->node);
+ fence_put(&fence->f);
+ }
+ spin_unlock_irqrestore(&drv->lock, irq_flags);
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_gem.c b/kernel/drivers/gpu/drm/virtio/virtgpu_gem.c
new file mode 100644
index 000000000..1feb7cee3
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+
+void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
+
+ if (obj)
+ virtio_gpu_object_unref(&obj);
+}
+
+struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
+ size_t size, bool kernel,
+ bool pinned)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object *obj;
+ int ret;
+
+ ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return obj;
+}
+
+int virtio_gpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p)
+{
+ struct virtio_gpu_object *obj;
+ int ret;
+ u32 handle;
+
+ obj = virtio_gpu_alloc_object(dev, size, false, false);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+ if (ret) {
+ drm_gem_object_release(&obj->gem_base);
+ return ret;
+ }
+
+ *obj_p = &obj->gem_base;
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(&obj->gem_base);
+
+ *handle_p = handle;
+ return 0;
+}
+
+int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ struct virtio_gpu_object *obj;
+ int ret;
+ uint32_t pitch;
+ uint32_t resid;
+
+ pitch = args->width * ((args->bpp + 1) / 8);
+ args->size = pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
+ &args->handle);
+ if (ret)
+ goto fail;
+
+ virtio_gpu_resource_id_get(vgdev, &resid);
+ virtio_gpu_cmd_create_resource(vgdev, resid,
+ 2, args->width, args->height);
+
+ /* attach the object to the resource */
+ obj = gem_to_virtio_gpu_obj(gobj);
+ ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+ if (ret)
+ goto fail;
+
+ obj->dumb = true;
+ args->pitch = pitch;
+ return ret;
+
+fail:
+ return ret;
+}
+
+int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ struct drm_gem_object *gobj;
+ struct virtio_gpu_object *obj;
+ BUG_ON(!offset_p);
+ gobj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gobj == NULL)
+ return -ENOENT;
+ obj = gem_to_virtio_gpu_obj(gobj);
+ *offset_p = virtio_gpu_object_mmap_offset(obj);
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
+
+int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
+ int r;
+
+ if (!vgdev->has_virgl_3d)
+ return 0;
+
+ r = virtio_gpu_object_reserve(qobj, false);
+ if (r)
+ return r;
+
+ virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
+ qobj->hw_res_handle);
+ virtio_gpu_object_unreserve(qobj);
+ return 0;
+}
+
+void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
+ int r;
+
+ if (!vgdev->has_virgl_3d)
+ return;
+
+ r = virtio_gpu_object_reserve(qobj, false);
+ if (r)
+ return;
+
+ virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
+ qobj->hw_res_handle);
+ virtio_gpu_object_unreserve(qobj);
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/kernel/drivers/gpu/drm/virtio/virtgpu_ioctl.c
new file mode 100644
index 000000000..b4de18e65
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie
+ * Alon Levy
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+#include <drm/virtgpu_drm.h>
+#include "ttm/ttm_execbuf_util.h"
+
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
+ const struct drm_virtgpu_3d_box *src)
+{
+ dst->x = cpu_to_le32(src->x);
+ dst->y = cpu_to_le32(src->y);
+ dst->z = cpu_to_le32(src->z);
+ dst->w = cpu_to_le32(src->w);
+ dst->h = cpu_to_le32(src->h);
+ dst->d = cpu_to_le32(src->d);
+}
+
+static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_virtgpu_map *virtio_gpu_map = data;
+
+ return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
+ virtio_gpu_map->handle,
+ &virtio_gpu_map->offset);
+}
+
+static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head)
+{
+ struct ttm_validate_buffer *buf;
+ struct ttm_buffer_object *bo;
+ struct virtio_gpu_object *qobj;
+ int ret;
+
+ ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
+ if (ret != 0)
+ return ret;
+
+ list_for_each_entry(buf, head, head) {
+ bo = buf->bo;
+ qobj = container_of(bo, struct virtio_gpu_object, tbo);
+ ret = ttm_bo_validate(bo, &qobj->placement, false, false);
+ if (ret) {
+ ttm_eu_backoff_reservation(ticket, head);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void virtio_gpu_unref_list(struct list_head *head)
+{
+ struct ttm_validate_buffer *buf;
+ struct ttm_buffer_object *bo;
+ struct virtio_gpu_object *qobj;
+ list_for_each_entry(buf, head, head) {
+ bo = buf->bo;
+ qobj = container_of(bo, struct virtio_gpu_object, tbo);
+
+ drm_gem_object_unreference_unlocked(&qobj->gem_base);
+ }
+}
+
+static int virtio_gpu_execbuffer(struct drm_device *dev,
+ struct drm_virtgpu_execbuffer *exbuf,
+ struct drm_file *drm_file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct drm_gem_object *gobj;
+ struct virtio_gpu_fence *fence;
+ struct virtio_gpu_object *qobj;
+ int ret;
+ uint32_t *bo_handles = NULL;
+ void __user *user_bo_handles = NULL;
+ struct list_head validate_list;
+ struct ttm_validate_buffer *buflist = NULL;
+ int i;
+ struct ww_acquire_ctx ticket;
+ void *buf;
+
+ if (vgdev->has_virgl_3d == false)
+ return -ENOSYS;
+
+ INIT_LIST_HEAD(&validate_list);
+ if (exbuf->num_bo_handles) {
+
+ bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
+ sizeof(uint32_t));
+ buflist = drm_calloc_large(exbuf->num_bo_handles,
+ sizeof(struct ttm_validate_buffer));
+ if (!bo_handles || !buflist) {
+ drm_free_large(bo_handles);
+ drm_free_large(buflist);
+ return -ENOMEM;
+ }
+
+ user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
+ if (copy_from_user(bo_handles, user_bo_handles,
+ exbuf->num_bo_handles * sizeof(uint32_t))) {
+ ret = -EFAULT;
+ drm_free_large(bo_handles);
+ drm_free_large(buflist);
+ return ret;
+ }
+
+ for (i = 0; i < exbuf->num_bo_handles; i++) {
+ gobj = drm_gem_object_lookup(dev,
+ drm_file, bo_handles[i]);
+ if (!gobj) {
+ drm_free_large(bo_handles);
+ drm_free_large(buflist);
+ return -ENOENT;
+ }
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+ buflist[i].bo = &qobj->tbo;
+
+ list_add(&buflist[i].head, &validate_list);
+ }
+ drm_free_large(bo_handles);
+ }
+
+ ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
+ if (ret)
+ goto out_free;
+
+ buf = kmalloc(exbuf->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_unresv;
+ }
+ if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
+ exbuf->size)) {
+ kfree(buf);
+ ret = -EFAULT;
+ goto out_unresv;
+ }
+ virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
+ vfpriv->ctx_id, &fence);
+
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+
+ /* fence the command bo */
+ virtio_gpu_unref_list(&validate_list);
+ drm_free_large(buflist);
+ fence_put(&fence->f);
+ return 0;
+
+out_unresv:
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
+out_free:
+ virtio_gpu_unref_list(&validate_list);
+ drm_free_large(buflist);
+ return ret;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full VIRTIO_GPUDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
+ */
+static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_virtgpu_execbuffer *execbuffer = data;
+ return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
+}
+
+
+static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_virtgpu_getparam *param = data;
+ int value;
+
+ switch (param->param) {
+ case VIRTGPU_PARAM_3D_FEATURES:
+ value = vgdev->has_virgl_3d == true ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (copy_to_user((void __user *)(unsigned long)param->value,
+ &value, sizeof(int))) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_virtgpu_resource_create *rc = data;
+ int ret;
+ uint32_t res_id;
+ struct virtio_gpu_object *qobj;
+ struct drm_gem_object *obj;
+ uint32_t handle = 0;
+ uint32_t size;
+ struct list_head validate_list;
+ struct ttm_validate_buffer mainbuf;
+ struct virtio_gpu_fence *fence = NULL;
+ struct ww_acquire_ctx ticket;
+ struct virtio_gpu_resource_create_3d rc_3d;
+
+ if (vgdev->has_virgl_3d == false) {
+ if (rc->depth > 1)
+ return -EINVAL;
+ if (rc->nr_samples > 1)
+ return -EINVAL;
+ if (rc->last_level > 1)
+ return -EINVAL;
+ if (rc->target != 2)
+ return -EINVAL;
+ if (rc->array_size > 1)
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&validate_list);
+ memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
+
+ virtio_gpu_resource_id_get(vgdev, &res_id);
+
+ size = rc->size;
+
+ /* allocate a single page size object */
+ if (size == 0)
+ size = PAGE_SIZE;
+
+ qobj = virtio_gpu_alloc_object(dev, size, false, false);
+ if (IS_ERR(qobj)) {
+ ret = PTR_ERR(qobj);
+ goto fail_id;
+ }
+ obj = &qobj->gem_base;
+
+ if (!vgdev->has_virgl_3d) {
+ virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
+ rc->width, rc->height);
+
+ ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
+ } else {
+ /* use a gem reference since unref list undoes them */
+ drm_gem_object_reference(&qobj->gem_base);
+ mainbuf.bo = &qobj->tbo;
+ list_add(&mainbuf.head, &validate_list);
+
+ ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
+ if (ret) {
+ DRM_DEBUG("failed to validate\n");
+ goto fail_unref;
+ }
+
+ rc_3d.resource_id = cpu_to_le32(res_id);
+ rc_3d.target = cpu_to_le32(rc->target);
+ rc_3d.format = cpu_to_le32(rc->format);
+ rc_3d.bind = cpu_to_le32(rc->bind);
+ rc_3d.width = cpu_to_le32(rc->width);
+ rc_3d.height = cpu_to_le32(rc->height);
+ rc_3d.depth = cpu_to_le32(rc->depth);
+ rc_3d.array_size = cpu_to_le32(rc->array_size);
+ rc_3d.last_level = cpu_to_le32(rc->last_level);
+ rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
+ rc_3d.flags = cpu_to_le32(rc->flags);
+
+ virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
+ ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
+ if (ret) {
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
+ goto fail_unref;
+ }
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+ }
+
+ qobj->hw_res_handle = res_id;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ if (ret) {
+
+ drm_gem_object_release(obj);
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_unref_list(&validate_list);
+ fence_put(&fence->f);
+ }
+ return ret;
+ }
+ drm_gem_object_unreference_unlocked(obj);
+
+ rc->res_handle = res_id; /* similiar to a VM address */
+ rc->bo_handle = handle;
+
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_unref_list(&validate_list);
+ fence_put(&fence->f);
+ }
+ return 0;
+fail_unref:
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_unref_list(&validate_list);
+ fence_put(&fence->f);
+ }
+//fail_obj:
+// drm_gem_object_handle_unreference_unlocked(obj);
+fail_id:
+ virtio_gpu_resource_id_put(vgdev, res_id);
+ return ret;
+}
+
+static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_virtgpu_resource_info *ri = data;
+ struct drm_gem_object *gobj = NULL;
+ struct virtio_gpu_object *qobj = NULL;
+
+ gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
+ if (gobj == NULL)
+ return -ENOENT;
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+
+ ri->size = qobj->gem_base.size;
+ ri->res_handle = qobj->hw_res_handle;
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
+
+static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct drm_gem_object *gobj = NULL;
+ struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_fence *fence;
+ int ret;
+ u32 offset = args->offset;
+ struct virtio_gpu_box box;
+
+ if (vgdev->has_virgl_3d == false)
+ return -ENOSYS;
+
+ gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
+ if (gobj == NULL)
+ return -ENOENT;
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+
+ ret = virtio_gpu_object_reserve(qobj, false);
+ if (ret)
+ goto out;
+
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+ true, false);
+ if (unlikely(ret))
+ goto out_unres;
+
+ convert_to_hw_box(&box, &args->box);
+ virtio_gpu_cmd_transfer_from_host_3d
+ (vgdev, qobj->hw_res_handle,
+ vfpriv->ctx_id, offset, args->level,
+ &box, &fence);
+ reservation_object_add_excl_fence(qobj->tbo.resv,
+ &fence->f);
+
+ fence_put(&fence->f);
+out_unres:
+ virtio_gpu_object_unreserve(qobj);
+out:
+ drm_gem_object_unreference_unlocked(gobj);
+ return ret;
+}
+
+static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct drm_gem_object *gobj = NULL;
+ struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_fence *fence;
+ struct virtio_gpu_box box;
+ int ret;
+ u32 offset = args->offset;
+
+ gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
+ if (gobj == NULL)
+ return -ENOENT;
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+
+ ret = virtio_gpu_object_reserve(qobj, false);
+ if (ret)
+ goto out;
+
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+ true, false);
+ if (unlikely(ret))
+ goto out_unres;
+
+ convert_to_hw_box(&box, &args->box);
+ if (!vgdev->has_virgl_3d) {
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, qobj->hw_res_handle, offset,
+ box.w, box.h, box.x, box.y, NULL);
+ } else {
+ virtio_gpu_cmd_transfer_to_host_3d
+ (vgdev, qobj->hw_res_handle,
+ vfpriv ? vfpriv->ctx_id : 0, offset,
+ args->level, &box, &fence);
+ reservation_object_add_excl_fence(qobj->tbo.resv,
+ &fence->f);
+ fence_put(&fence->f);
+ }
+
+out_unres:
+ virtio_gpu_object_unreserve(qobj);
+out:
+ drm_gem_object_unreference_unlocked(gobj);
+ return ret;
+}
+
+static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_virtgpu_3d_wait *args = data;
+ struct drm_gem_object *gobj = NULL;
+ struct virtio_gpu_object *qobj = NULL;
+ int ret;
+ bool nowait = false;
+
+ gobj = drm_gem_object_lookup(dev, file, args->handle);
+ if (gobj == NULL)
+ return -ENOENT;
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+
+ if (args->flags & VIRTGPU_WAIT_NOWAIT)
+ nowait = true;
+ ret = virtio_gpu_object_wait(qobj, nowait);
+
+ drm_gem_object_unreference_unlocked(gobj);
+ return ret;
+}
+
+static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_virtgpu_get_caps *args = data;
+ int size;
+ int i;
+ int found_valid = -1;
+ int ret;
+ struct virtio_gpu_drv_cap_cache *cache_ent;
+ void *ptr;
+ if (vgdev->num_capsets == 0)
+ return -ENOSYS;
+
+ spin_lock(&vgdev->display_info_lock);
+ for (i = 0; i < vgdev->num_capsets; i++) {
+ if (vgdev->capsets[i].id == args->cap_set_id) {
+ if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
+ found_valid = i;
+ break;
+ }
+ }
+ }
+
+ if (found_valid == -1) {
+ spin_unlock(&vgdev->display_info_lock);
+ return -EINVAL;
+ }
+
+ size = vgdev->capsets[found_valid].max_size;
+ if (args->size > size) {
+ spin_unlock(&vgdev->display_info_lock);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
+ if (cache_ent->id == args->cap_set_id &&
+ cache_ent->version == args->cap_set_ver) {
+ ptr = cache_ent->caps_cache;
+ spin_unlock(&vgdev->display_info_lock);
+ goto copy_exit;
+ }
+ }
+ spin_unlock(&vgdev->display_info_lock);
+
+ /* not in cache - need to talk to hw */
+ virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
+ &cache_ent);
+
+ ret = wait_event_timeout(vgdev->resp_wq,
+ atomic_read(&cache_ent->is_valid), 5 * HZ);
+
+ ptr = cache_ent->caps_cache;
+
+copy_exit:
+ if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
+ DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
+ virtio_gpu_resource_create_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ /* make transfer async to the main ring? - no sure, can we
+ thread these in the underlying GL */
+ DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
+ virtio_gpu_transfer_from_host_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
+ virtio_gpu_transfer_to_host_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+};
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_kms.c b/kernel/drivers/gpu/drm/virtio/virtgpu_kms.c
new file mode 100644
index 000000000..06496a128
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+
+static int virtio_gpu_fbdev = 1;
+
+MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console");
+module_param_named(fbdev, virtio_gpu_fbdev, int, 0400);
+
+static void virtio_gpu_config_changed_work_func(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device,
+ config_changed_work);
+ u32 events_read, events_clear = 0;
+
+ /* read the config space */
+ virtio_cread(vgdev->vdev, struct virtio_gpu_config,
+ events_read, &events_read);
+ if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
+ virtio_gpu_cmd_get_display_info(vgdev);
+ drm_helper_hpd_irq_event(vgdev->ddev);
+ events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
+ }
+ virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
+ events_clear, &events_clear);
+}
+
+static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid)
+{
+ int handle;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&vgdev->ctx_id_idr_lock);
+ handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
+ spin_unlock(&vgdev->ctx_id_idr_lock);
+ idr_preload_end();
+ *resid = handle;
+}
+
+static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
+{
+ spin_lock(&vgdev->ctx_id_idr_lock);
+ idr_remove(&vgdev->ctx_id_idr, id);
+ spin_unlock(&vgdev->ctx_id_idr_lock);
+}
+
+static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
+ uint32_t nlen, const char *name,
+ uint32_t *ctx_id)
+{
+ virtio_gpu_ctx_id_get(vgdev, ctx_id);
+ virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
+}
+
+static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
+ uint32_t ctx_id)
+{
+ virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
+ virtio_gpu_ctx_id_put(vgdev, ctx_id);
+}
+
+static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
+ void (*work_func)(struct work_struct *work))
+{
+ spin_lock_init(&vgvq->qlock);
+ init_waitqueue_head(&vgvq->ack_queue);
+ INIT_WORK(&vgvq->dequeue_work, work_func);
+}
+
+static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
+ int num_capsets)
+{
+ int i, ret;
+
+ vgdev->capsets = kcalloc(num_capsets,
+ sizeof(struct virtio_gpu_drv_capset),
+ GFP_KERNEL);
+ if (!vgdev->capsets) {
+ DRM_ERROR("failed to allocate cap sets\n");
+ return;
+ }
+ for (i = 0; i < num_capsets; i++) {
+ virtio_gpu_cmd_get_capset_info(vgdev, i);
+ ret = wait_event_timeout(vgdev->resp_wq,
+ vgdev->capsets[i].id > 0, 5 * HZ);
+ if (ret == 0) {
+ DRM_ERROR("timed out waiting for cap set %d\n", i);
+ kfree(vgdev->capsets);
+ vgdev->capsets = NULL;
+ return;
+ }
+ DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
+ i, vgdev->capsets[i].id,
+ vgdev->capsets[i].max_version,
+ vgdev->capsets[i].max_size);
+ }
+ vgdev->num_capsets = num_capsets;
+}
+
+int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ static vq_callback_t *callbacks[] = {
+ virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
+ };
+ static const char *names[] = { "control", "cursor" };
+
+ struct virtio_gpu_device *vgdev;
+ /* this will expand later */
+ struct virtqueue *vqs[2];
+ u32 num_scanouts, num_capsets;
+ int ret;
+
+ if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
+ if (!vgdev)
+ return -ENOMEM;
+
+ vgdev->ddev = dev;
+ dev->dev_private = vgdev;
+ vgdev->vdev = dev->virtdev;
+ vgdev->dev = dev->dev;
+
+ spin_lock_init(&vgdev->display_info_lock);
+ spin_lock_init(&vgdev->ctx_id_idr_lock);
+ idr_init(&vgdev->ctx_id_idr);
+ spin_lock_init(&vgdev->resource_idr_lock);
+ idr_init(&vgdev->resource_idr);
+ init_waitqueue_head(&vgdev->resp_wq);
+ virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
+ virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
+
+ spin_lock_init(&vgdev->fence_drv.lock);
+ INIT_LIST_HEAD(&vgdev->fence_drv.fences);
+ INIT_LIST_HEAD(&vgdev->cap_cache);
+ INIT_WORK(&vgdev->config_changed_work,
+ virtio_gpu_config_changed_work_func);
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
+ vgdev->has_virgl_3d = true;
+ DRM_INFO("virgl 3d acceleration %s\n",
+ vgdev->has_virgl_3d ? "enabled" : "not available");
+
+ ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
+ callbacks, names);
+ if (ret) {
+ DRM_ERROR("failed to find virt queues\n");
+ goto err_vqs;
+ }
+ vgdev->ctrlq.vq = vqs[0];
+ vgdev->cursorq.vq = vqs[1];
+ ret = virtio_gpu_alloc_vbufs(vgdev);
+ if (ret) {
+ DRM_ERROR("failed to alloc vbufs\n");
+ goto err_vbufs;
+ }
+
+ ret = virtio_gpu_ttm_init(vgdev);
+ if (ret) {
+ DRM_ERROR("failed to init ttm %d\n", ret);
+ goto err_ttm;
+ }
+
+ /* get display info */
+ virtio_cread(vgdev->vdev, struct virtio_gpu_config,
+ num_scanouts, &num_scanouts);
+ vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
+ VIRTIO_GPU_MAX_SCANOUTS);
+ if (!vgdev->num_scanouts) {
+ DRM_ERROR("num_scanouts is zero\n");
+ ret = -EINVAL;
+ goto err_scanouts;
+ }
+ DRM_INFO("number of scanouts: %d\n", num_scanouts);
+
+ virtio_cread(vgdev->vdev, struct virtio_gpu_config,
+ num_capsets, &num_capsets);
+ DRM_INFO("number of cap sets: %d\n", num_capsets);
+
+ ret = virtio_gpu_modeset_init(vgdev);
+ if (ret)
+ goto err_modeset;
+
+ virtio_device_ready(vgdev->vdev);
+ vgdev->vqs_ready = true;
+
+ if (num_capsets)
+ virtio_gpu_get_capsets(vgdev, num_capsets);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
+ 5 * HZ);
+ if (virtio_gpu_fbdev)
+ virtio_gpu_fbdev_init(vgdev);
+
+ return 0;
+
+err_modeset:
+err_scanouts:
+ virtio_gpu_ttm_fini(vgdev);
+err_ttm:
+ virtio_gpu_free_vbufs(vgdev);
+err_vbufs:
+ vgdev->vdev->config->del_vqs(vgdev->vdev);
+err_vqs:
+ kfree(vgdev);
+ return ret;
+}
+
+static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
+
+ list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
+ kfree(cache_ent->caps_cache);
+ kfree(cache_ent);
+ }
+}
+
+int virtio_gpu_driver_unload(struct drm_device *dev)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+
+ vgdev->vqs_ready = false;
+ flush_work(&vgdev->ctrlq.dequeue_work);
+ flush_work(&vgdev->cursorq.dequeue_work);
+ flush_work(&vgdev->config_changed_work);
+ vgdev->vdev->config->del_vqs(vgdev->vdev);
+
+ virtio_gpu_modeset_fini(vgdev);
+ virtio_gpu_ttm_fini(vgdev);
+ virtio_gpu_free_vbufs(vgdev);
+ virtio_gpu_cleanup_cap_cache(vgdev);
+ kfree(vgdev->capsets);
+ kfree(vgdev);
+ return 0;
+}
+
+int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv;
+ uint32_t id;
+ char dbgname[64], tmpname[TASK_COMM_LEN];
+
+ /* can't create contexts without 3d renderer */
+ if (!vgdev->has_virgl_3d)
+ return 0;
+
+ get_task_comm(tmpname, current);
+ snprintf(dbgname, sizeof(dbgname), "%s", tmpname);
+ dbgname[63] = 0;
+ /* allocate a virt GPU context for this opener */
+ vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
+ if (!vfpriv)
+ return -ENOMEM;
+
+ virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
+
+ vfpriv->ctx_id = id;
+ file->driver_priv = vfpriv;
+ return 0;
+}
+
+void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv;
+
+ if (!vgdev->has_virgl_3d)
+ return;
+
+ vfpriv = file->driver_priv;
+
+ virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+ kfree(vfpriv);
+ file->driver_priv = NULL;
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_object.c b/kernel/drivers/gpu/drm/virtio/virtgpu_object.c
new file mode 100644
index 000000000..f300eba95
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "virtgpu_drv.h"
+
+static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+ struct virtio_gpu_object *bo;
+ struct virtio_gpu_device *vgdev;
+
+ bo = container_of(tbo, struct virtio_gpu_object, tbo);
+ vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+
+ if (bo->hw_res_handle)
+ virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
+ if (bo->pages)
+ virtio_gpu_object_free_sg_table(bo);
+ drm_gem_object_release(&bo->gem_base);
+ kfree(bo);
+}
+
+static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
+ bool pinned)
+{
+ u32 c = 1;
+ u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+
+ vgbo->placement.placement = &vgbo->placement_code;
+ vgbo->placement.busy_placement = &vgbo->placement_code;
+ vgbo->placement_code.fpfn = 0;
+ vgbo->placement_code.lpfn = 0;
+ vgbo->placement_code.flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
+ vgbo->placement.num_placement = c;
+ vgbo->placement.num_busy_placement = c;
+
+}
+
+int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ unsigned long size, bool kernel, bool pinned,
+ struct virtio_gpu_object **bo_ptr)
+{
+ struct virtio_gpu_object *bo;
+ enum ttm_bo_type type;
+ size_t acc_size;
+ int ret;
+
+ if (kernel)
+ type = ttm_bo_type_kernel;
+ else
+ type = ttm_bo_type_device;
+ *bo_ptr = NULL;
+
+ acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
+ sizeof(struct virtio_gpu_object));
+
+ bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+ size = roundup(size, PAGE_SIZE);
+ ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
+ if (ret != 0)
+ return ret;
+ bo->dumb = false;
+ virtio_gpu_init_ttm_placement(bo, pinned);
+
+ ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, 0, !kernel, NULL, acc_size,
+ NULL, NULL, &virtio_gpu_ttm_bo_destroy);
+ /* ttm_bo_init failure will call the destroy */
+ if (ret != 0)
+ return ret;
+
+ *bo_ptr = bo;
+ return 0;
+}
+
+int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr)
+{
+ bool is_iomem;
+ int r;
+
+ if (bo->vmap) {
+ if (ptr)
+ *ptr = bo->vmap;
+ return 0;
+ }
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ if (r)
+ return r;
+ bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+ if (ptr)
+ *ptr = bo->vmap;
+ return 0;
+}
+
+int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+ struct virtio_gpu_object *bo)
+{
+ int ret;
+ struct page **pages = bo->tbo.ttm->pages;
+ int nr_pages = bo->tbo.num_pages;
+
+ /* wtf swapping */
+ if (bo->pages)
+ return 0;
+
+ if (bo->tbo.ttm->state == tt_unpopulated)
+ bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
+ bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!bo->pages)
+ goto out;
+
+ ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ kfree(bo->pages);
+ bo->pages = NULL;
+ return -ENOMEM;
+}
+
+void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
+{
+ sg_free_table(bo->pages);
+ kfree(bo->pages);
+ bo->pages = NULL;
+}
+
+int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ if (unlikely(r != 0))
+ return r;
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
+
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_plane.c b/kernel/drivers/gpu/drm/virtio/virtgpu_plane.c
new file mode 100644
index 000000000..4a74129c5
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "virtgpu_drv.h"
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static const uint32_t virtio_gpu_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+static void virtio_gpu_plane_destroy(struct drm_plane *plane)
+{
+ kfree(plane);
+}
+
+static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = virtio_gpu_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc);
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+ uint32_t handle;
+
+ if (plane->fb) {
+ vgfb = to_virtio_gpu_framebuffer(plane->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ handle = bo->hw_res_handle;
+ } else {
+ handle = 0;
+ }
+
+ DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle,
+ plane->state->crtc_w, plane->state->crtc_h,
+ plane->state->crtc_x, plane->state->crtc_y);
+ virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->crtc_x,
+ plane->state->crtc_y);
+}
+
+
+static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = {
+ .atomic_check = virtio_gpu_plane_atomic_check,
+ .atomic_update = virtio_gpu_plane_atomic_update,
+};
+
+struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ int index)
+{
+ struct drm_device *dev = vgdev->ddev;
+ struct drm_plane *plane;
+ int ret;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_universal_plane_init(dev, plane, 1 << index,
+ &virtio_gpu_plane_funcs,
+ virtio_gpu_formats,
+ ARRAY_SIZE(virtio_gpu_formats),
+ DRM_PLANE_TYPE_PRIMARY);
+ if (ret)
+ goto err_plane_init;
+
+ drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs);
+ return plane;
+
+err_plane_init:
+ kfree(plane);
+ return ERR_PTR(ret);
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_prime.c b/kernel/drivers/gpu/drm/virtio/virtgpu_prime.c
new file mode 100644
index 000000000..385e0eb98
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "virtgpu_drv.h"
+
+/* Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with virtgpu */
+
+int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENODEV;
+}
+
+void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
+}
+
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
+}
+
+void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
+}
+
+void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *area)
+{
+ return -ENODEV;
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_ttm.c b/kernel/drivers/gpu/drm/virtio/virtgpu_ttm.c
new file mode 100644
index 000000000..9fd924cd2
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie
+ * Alon Levy
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/virtgpu_drm.h>
+#include "virtgpu_drv.h"
+
+#include <linux/delay.h>
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static struct
+virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
+{
+ struct virtio_gpu_mman *mman;
+ struct virtio_gpu_device *vgdev;
+
+ mman = container_of(bdev, struct virtio_gpu_mman, bdev);
+ vgdev = container_of(mman, struct virtio_gpu_device, mman);
+ return vgdev;
+}
+
+static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ vgdev->mman.mem_global_referenced = false;
+ global_ref = &vgdev->mman.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &virtio_gpu_ttm_mem_global_init;
+ global_ref->release = &virtio_gpu_ttm_mem_global_release;
+
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ vgdev->mman.bo_global_ref.mem_glob =
+ vgdev->mman.mem_global_ref.object;
+ global_ref = &vgdev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&vgdev->mman.mem_global_ref);
+ return r;
+ }
+
+ vgdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
+{
+ if (vgdev->mman.mem_global_referenced) {
+ drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
+ drm_global_item_unref(&vgdev->mman.mem_global_ref);
+ vgdev->mman.mem_global_referenced = false;
+ }
+}
+
+#if 0
+/*
+ * Hmm, seems to not do anything useful. Leftover debug hack?
+ * Something like printing pagefaults to kernel log?
+ */
+static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo;
+ struct virtio_gpu_device *vgdev;
+ int r;
+
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ if (bo == NULL)
+ return VM_FAULT_NOPAGE;
+ vgdev = virtio_gpu_get_vgdev(bo->bdev);
+ r = ttm_vm_ops->fault(vma, vmf);
+ return r;
+}
+#endif
+
+int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct virtio_gpu_device *vgdev;
+ int r;
+
+ file_priv = filp->private_data;
+ vgdev = file_priv->minor->dev->dev_private;
+ if (vgdev == NULL) {
+ DRM_ERROR(
+ "filp->private_data->minor->dev->dev_private == NULL\n");
+ return -EINVAL;
+ }
+ r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
+#if 0
+ if (unlikely(r != 0))
+ return r;
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
+ virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
+ }
+ vma->vm_ops = &virtio_gpu_ttm_vm_ops;
+ return 0;
+#else
+ return r;
+#endif
+}
+
+static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
+ uint32_t flags)
+{
+ return 0;
+}
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ mem->mm_node = (void *)1;
+ return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ mem->mm_node = (void *)NULL;
+ return;
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+}
+
+static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
+ ttm_bo_man_init,
+ ttm_bo_man_takedown,
+ ttm_bo_man_get_node,
+ ttm_bo_man_put_node,
+ ttm_bo_man_debug
+};
+
+static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct virtio_gpu_device *vgdev;
+
+ vgdev = virtio_gpu_get_vgdev(bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_TT:
+ man->func = &virtio_gpu_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ static struct ttm_place placements = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
+ };
+
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+ placement->num_busy_placement = 1;
+ return;
+}
+
+static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ case TTM_PL_TT:
+ /* system memory */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct virtio_gpu_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct virtio_gpu_device *vgdev;
+ u64 offset;
+};
+
+static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+
+ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ if (!ttm->num_pages)
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ ttm->num_pages, bo_mem, ttm);
+
+ /* Not implemented */
+ return 0;
+}
+
+static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+ struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+}
+
+static struct ttm_backend_func virtio_gpu_backend_func = {
+ .bind = &virtio_gpu_ttm_backend_bind,
+ .unbind = &virtio_gpu_ttm_backend_unbind,
+ .destroy = &virtio_gpu_ttm_backend_destroy,
+};
+
+static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ return ttm_pool_populate(ttm);
+}
+
+static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct virtio_gpu_device *vgdev;
+ struct virtio_gpu_ttm_tt *gtt;
+
+ vgdev = virtio_gpu_get_vgdev(bdev);
+ gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
+ if (gtt == NULL)
+ return NULL;
+ gtt->ttm.ttm.func = &virtio_gpu_backend_func;
+ gtt->vgdev = vgdev;
+ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+ dummy_read_page)) {
+ kfree(gtt);
+ return NULL;
+ }
+ return &gtt->ttm.ttm;
+}
+
+static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ BUG_ON(old_mem->mm_node != NULL);
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+}
+
+static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ virtio_gpu_move_null(bo, new_mem);
+ return 0;
+}
+
+static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
+ struct ttm_mem_reg *new_mem)
+{
+ struct virtio_gpu_object *bo;
+ struct virtio_gpu_device *vgdev;
+
+ bo = container_of(tbo, struct virtio_gpu_object, tbo);
+ vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+
+ if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
+ if (bo->hw_res_handle)
+ virtio_gpu_cmd_resource_inval_backing(vgdev,
+ bo->hw_res_handle);
+
+ } else if (new_mem->placement & TTM_PL_FLAG_TT) {
+ if (bo->hw_res_handle) {
+ virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
+ NULL);
+ }
+ }
+}
+
+static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
+{
+ struct virtio_gpu_object *bo;
+ struct virtio_gpu_device *vgdev;
+
+ bo = container_of(tbo, struct virtio_gpu_object, tbo);
+ vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+
+ if (bo->pages)
+ virtio_gpu_object_free_sg_table(bo);
+}
+
+static struct ttm_bo_driver virtio_gpu_bo_driver = {
+ .ttm_tt_create = &virtio_gpu_ttm_tt_create,
+ .ttm_tt_populate = &virtio_gpu_ttm_tt_populate,
+ .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
+ .invalidate_caches = &virtio_gpu_invalidate_caches,
+ .init_mem_type = &virtio_gpu_init_mem_type,
+ .evict_flags = &virtio_gpu_evict_flags,
+ .move = &virtio_gpu_bo_move,
+ .verify_access = &virtio_gpu_verify_access,
+ .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
+ .io_mem_free = &virtio_gpu_ttm_io_mem_free,
+ .move_notify = &virtio_gpu_bo_move_notify,
+ .swap_notify = &virtio_gpu_bo_swap_notify,
+};
+
+int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
+{
+ int r;
+
+ r = virtio_gpu_ttm_global_init(vgdev);
+ if (r)
+ return r;
+ /* No others user of address space so set it to 0 */
+ r = ttm_bo_device_init(&vgdev->mman.bdev,
+ vgdev->mman.bo_global_ref.ref.object,
+ &virtio_gpu_bo_driver,
+ vgdev->ddev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET, 0);
+ if (r) {
+ DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ goto err_dev_init;
+ }
+
+ r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
+ if (r) {
+ DRM_ERROR("Failed initializing GTT heap.\n");
+ goto err_mm_init;
+ }
+ return 0;
+
+err_mm_init:
+ ttm_bo_device_release(&vgdev->mman.bdev);
+err_dev_init:
+ virtio_gpu_ttm_global_fini(vgdev);
+ return r;
+}
+
+void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
+{
+ ttm_bo_device_release(&vgdev->mman.bdev);
+ virtio_gpu_ttm_global_fini(vgdev);
+ DRM_INFO("virtio_gpu: ttm finalized\n");
+}
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c b/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c
new file mode 100644
index 000000000..5a0f8a745
--- /dev/null
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -0,0 +1,925 @@
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+#define MAX_INLINE_CMD_SIZE 96
+#define MAX_INLINE_RESP_SIZE 24
+#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
+ + MAX_INLINE_CMD_SIZE \
+ + MAX_INLINE_RESP_SIZE)
+
+void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid)
+{
+ int handle;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&vgdev->resource_idr_lock);
+ handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
+ spin_unlock(&vgdev->resource_idr_lock);
+ idr_preload_end();
+ *resid = handle;
+}
+
+void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
+{
+ spin_lock(&vgdev->resource_idr_lock);
+ idr_remove(&vgdev->resource_idr, id);
+ spin_unlock(&vgdev->resource_idr_lock);
+}
+
+void virtio_gpu_ctrl_ack(struct virtqueue *vq)
+{
+ struct drm_device *dev = vq->vdev->priv;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ schedule_work(&vgdev->ctrlq.dequeue_work);
+}
+
+void virtio_gpu_cursor_ack(struct virtqueue *vq)
+{
+ struct drm_device *dev = vq->vdev->priv;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ schedule_work(&vgdev->cursorq.dequeue_work);
+}
+
+int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+ int i, size, count = 0;
+ void *ptr;
+
+ INIT_LIST_HEAD(&vgdev->free_vbufs);
+ spin_lock_init(&vgdev->free_vbufs_lock);
+ count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
+ count += virtqueue_get_vring_size(vgdev->cursorq.vq);
+ size = count * VBUFFER_SIZE;
+ DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
+ count, VBUFFER_SIZE, size / 1024);
+
+ vgdev->vbufs = kzalloc(size, GFP_KERNEL);
+ if (!vgdev->vbufs)
+ return -ENOMEM;
+
+ for (i = 0, ptr = vgdev->vbufs;
+ i < count;
+ i++, ptr += VBUFFER_SIZE) {
+ vbuf = ptr;
+ list_add(&vbuf->list, &vgdev->free_vbufs);
+ }
+ return 0;
+}
+
+void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+ int i, count = 0;
+
+ count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
+ count += virtqueue_get_vring_size(vgdev->cursorq.vq);
+
+ spin_lock(&vgdev->free_vbufs_lock);
+ for (i = 0; i < count; i++) {
+ if (WARN_ON(list_empty(&vgdev->free_vbufs)))
+ return;
+ vbuf = list_first_entry(&vgdev->free_vbufs,
+ struct virtio_gpu_vbuffer, list);
+ list_del(&vbuf->list);
+ }
+ spin_unlock(&vgdev->free_vbufs_lock);
+ kfree(vgdev->vbufs);
+}
+
+static struct virtio_gpu_vbuffer*
+virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
+ int size, int resp_size, void *resp_buf,
+ virtio_gpu_resp_cb resp_cb)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ spin_lock(&vgdev->free_vbufs_lock);
+ BUG_ON(list_empty(&vgdev->free_vbufs));
+ vbuf = list_first_entry(&vgdev->free_vbufs,
+ struct virtio_gpu_vbuffer, list);
+ list_del(&vbuf->list);
+ spin_unlock(&vgdev->free_vbufs_lock);
+ memset(vbuf, 0, VBUFFER_SIZE);
+
+ BUG_ON(size > MAX_INLINE_CMD_SIZE);
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+
+ vbuf->resp_cb = resp_cb;
+ vbuf->resp_size = resp_size;
+ if (resp_size <= MAX_INLINE_RESP_SIZE)
+ vbuf->resp_buf = (void *)vbuf->buf + size;
+ else
+ vbuf->resp_buf = resp_buf;
+ BUG_ON(!vbuf->resp_buf);
+ return vbuf;
+}
+
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_get_vbuf(vgdev, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL, NULL);
+ if (IS_ERR(vbuf)) {
+ *vbuffer_p = NULL;
+ return ERR_CAST(vbuf);
+ }
+ *vbuffer_p = vbuf;
+ return vbuf->buf;
+}
+
+static struct virtio_gpu_update_cursor*
+virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_get_vbuf
+ (vgdev, sizeof(struct virtio_gpu_update_cursor),
+ 0, NULL, NULL);
+ if (IS_ERR(vbuf)) {
+ *vbuffer_p = NULL;
+ return ERR_CAST(vbuf);
+ }
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_update_cursor *)vbuf->buf;
+}
+
+static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ virtio_gpu_resp_cb cb,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size, int resp_size,
+ void *resp_buf)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
+ resp_size, resp_buf, cb);
+ if (IS_ERR(vbuf)) {
+ *vbuffer_p = NULL;
+ return ERR_CAST(vbuf);
+ }
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
+static void free_vbuf(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
+ kfree(vbuf->resp_buf);
+ kfree(vbuf->data_buf);
+ spin_lock(&vgdev->free_vbufs_lock);
+ list_add(&vbuf->list, &vgdev->free_vbufs);
+ spin_unlock(&vgdev->free_vbufs_lock);
+}
+
+static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+ unsigned int len;
+ int freed = 0;
+
+ while ((vbuf = virtqueue_get_buf(vq, &len))) {
+ list_add_tail(&vbuf->list, reclaim_list);
+ freed++;
+ }
+ if (freed == 0)
+ DRM_DEBUG("Huh? zero vbufs reclaimed");
+}
+
+void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device,
+ ctrlq.dequeue_work);
+ struct list_head reclaim_list;
+ struct virtio_gpu_vbuffer *entry, *tmp;
+ struct virtio_gpu_ctrl_hdr *resp;
+ u64 fence_id = 0;
+
+ INIT_LIST_HEAD(&reclaim_list);
+ spin_lock(&vgdev->ctrlq.qlock);
+ do {
+ virtqueue_disable_cb(vgdev->ctrlq.vq);
+ reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
+
+ } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+ if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
+ DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+ if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
+ u64 f = le64_to_cpu(resp->fence_id);
+
+ if (fence_id > f) {
+ DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
+ __func__, fence_id, f);
+ } else {
+ fence_id = f;
+ }
+ }
+ if (entry->resp_cb)
+ entry->resp_cb(vgdev, entry);
+
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
+ wake_up(&vgdev->ctrlq.ack_queue);
+
+ if (fence_id)
+ virtio_gpu_fence_event_process(vgdev, fence_id);
+}
+
+void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device,
+ cursorq.dequeue_work);
+ struct list_head reclaim_list;
+ struct virtio_gpu_vbuffer *entry, *tmp;
+
+ INIT_LIST_HEAD(&reclaim_list);
+ spin_lock(&vgdev->cursorq.qlock);
+ do {
+ virtqueue_disable_cb(vgdev->cursorq.vq);
+ reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
+ } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
+ spin_unlock(&vgdev->cursorq.qlock);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
+ wake_up(&vgdev->cursorq.ack_queue);
+}
+
+static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
+ int outcnt = 0, incnt = 0;
+ int ret;
+
+ if (!vgdev->vqs_ready)
+ return -ENODEV;
+
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ sgs[outcnt+incnt] = &vcmd;
+ outcnt++;
+
+ if (vbuf->data_size) {
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+ sgs[outcnt + incnt] = &vout;
+ outcnt++;
+ }
+
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+retry:
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ if (ret == -ENOSPC) {
+ spin_unlock(&vgdev->ctrlq.qlock);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ spin_lock(&vgdev->ctrlq.qlock);
+ goto retry;
+ } else {
+ virtqueue_kick(vq);
+ }
+
+ if (!ret)
+ ret = vq->num_free;
+ return ret;
+}
+
+static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ int rc;
+
+ spin_lock(&vgdev->ctrlq.qlock);
+ rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+ spin_unlock(&vgdev->ctrlq.qlock);
+ return rc;
+}
+
+static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_ctrl_hdr *hdr,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ int rc;
+
+again:
+ spin_lock(&vgdev->ctrlq.qlock);
+
+ /*
+ * Make sure we have enouth space in the virtqueue. If not
+ * wait here until we have.
+ *
+ * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
+ * to wait for free space, which can result in fence ids being
+ * submitted out-of-order.
+ */
+ if (vq->num_free < 3) {
+ spin_unlock(&vgdev->ctrlq.qlock);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
+ goto again;
+ }
+
+ if (fence)
+ virtio_gpu_fence_emit(vgdev, hdr, fence);
+ rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+ spin_unlock(&vgdev->ctrlq.qlock);
+ return rc;
+}
+
+static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtqueue *vq = vgdev->cursorq.vq;
+ struct scatterlist *sgs[1], ccmd;
+ int ret;
+ int outcnt;
+
+ if (!vgdev->vqs_ready)
+ return -ENODEV;
+
+ sg_init_one(&ccmd, vbuf->buf, vbuf->size);
+ sgs[0] = &ccmd;
+ outcnt = 1;
+
+ spin_lock(&vgdev->cursorq.qlock);
+retry:
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
+ if (ret == -ENOSPC) {
+ spin_unlock(&vgdev->cursorq.qlock);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ spin_lock(&vgdev->cursorq.qlock);
+ goto retry;
+ } else {
+ virtqueue_kick(vq);
+ }
+
+ spin_unlock(&vgdev->cursorq.qlock);
+
+ if (!ret)
+ ret = vq->num_free;
+ return ret;
+}
+
+/* just create gem objects for userspace and long lived objects,
+ just use dma_alloced pages for the queue objects? */
+
+/* create a basic resource */
+void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t format,
+ uint32_t width,
+ uint32_t height)
+{
+ struct virtio_gpu_resource_create_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->format = cpu_to_le32(format);
+ cmd_p->width = cpu_to_le32(width);
+ cmd_p->height = cpu_to_le32(height);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id)
+{
+ struct virtio_gpu_resource_unref *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id, uint32_t resource_id,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ struct virtio_gpu_set_scanout *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->scanout_id = cpu_to_le32(scanout_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct virtio_gpu_resource_flush *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id, uint64_t offset,
+ __le32 width, __le32 height,
+ __le32 x, __le32 y,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_transfer_to_host_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->r.width = width;
+ cmd_p->r.height = height;
+ cmd_p->r.x = x;
+ cmd_p->r.y = y;
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+static void
+virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_mem_entry *ents,
+ uint32_t nents,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_resource_attach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->nr_entries = cpu_to_le32(nents);
+
+ vbuf->data_buf = ents;
+ vbuf->data_size = sizeof(*ents) * nents;
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_resp_display_info *resp =
+ (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
+ int i;
+
+ spin_lock(&vgdev->display_info_lock);
+ for (i = 0; i < vgdev->num_scanouts; i++) {
+ vgdev->outputs[i].info = resp->pmodes[i];
+ if (resp->pmodes[i].enabled) {
+ DRM_DEBUG("output %d: %dx%d+%d+%d", i,
+ le32_to_cpu(resp->pmodes[i].r.width),
+ le32_to_cpu(resp->pmodes[i].r.height),
+ le32_to_cpu(resp->pmodes[i].r.x),
+ le32_to_cpu(resp->pmodes[i].r.y));
+ } else {
+ DRM_DEBUG("output %d: disabled", i);
+ }
+ }
+
+ vgdev->display_info_pending = false;
+ spin_unlock(&vgdev->display_info_lock);
+ wake_up(&vgdev->resp_wq);
+
+ if (!drm_helper_hpd_irq_event(vgdev->ddev))
+ drm_kms_helper_hotplug_event(vgdev->ddev);
+}
+
+static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_get_capset_info *cmd =
+ (struct virtio_gpu_get_capset_info *)vbuf->buf;
+ struct virtio_gpu_resp_capset_info *resp =
+ (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
+ int i = le32_to_cpu(cmd->capset_index);
+
+ spin_lock(&vgdev->display_info_lock);
+ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ spin_unlock(&vgdev->display_info_lock);
+ wake_up(&vgdev->resp_wq);
+}
+
+static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_get_capset *cmd =
+ (struct virtio_gpu_get_capset *)vbuf->buf;
+ struct virtio_gpu_resp_capset *resp =
+ (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
+ struct virtio_gpu_drv_cap_cache *cache_ent;
+
+ spin_lock(&vgdev->display_info_lock);
+ list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
+ if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
+ cache_ent->id == le32_to_cpu(cmd->capset_id)) {
+ memcpy(cache_ent->caps_cache, resp->capset_data,
+ cache_ent->size);
+ atomic_set(&cache_ent->is_valid, 1);
+ break;
+ }
+ }
+ spin_unlock(&vgdev->display_info_lock);
+ wake_up(&vgdev->resp_wq);
+}
+
+
+int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_ctrl_hdr *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
+ resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ vgdev->display_info_pending = true;
+ cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
+{
+ struct virtio_gpu_get_capset_info *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
+ resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
+ cmd_p->capset_index = cpu_to_le32(idx);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
+ int idx, int version,
+ struct virtio_gpu_drv_cap_cache **cache_p)
+{
+ struct virtio_gpu_get_capset *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ int max_size = vgdev->capsets[idx].max_size;
+ struct virtio_gpu_drv_cap_cache *cache_ent;
+ void *resp_buf;
+
+ if (idx > vgdev->num_capsets)
+ return -EINVAL;
+
+ if (version > vgdev->capsets[idx].max_version)
+ return -EINVAL;
+
+ cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
+ if (!cache_ent)
+ return -ENOMEM;
+
+ cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
+ if (!cache_ent->caps_cache) {
+ kfree(cache_ent);
+ return -ENOMEM;
+ }
+
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
+ GFP_KERNEL);
+ if (!resp_buf) {
+ kfree(cache_ent->caps_cache);
+ kfree(cache_ent);
+ return -ENOMEM;
+ }
+
+ cache_ent->version = version;
+ cache_ent->id = vgdev->capsets[idx].id;
+ atomic_set(&cache_ent->is_valid, 0);
+ cache_ent->size = max_size;
+ spin_lock(&vgdev->display_info_lock);
+ list_add_tail(&cache_ent->head, &vgdev->cap_cache);
+ spin_unlock(&vgdev->display_info_lock);
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_capset) + max_size,
+ resp_buf);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
+ cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
+ cmd_p->capset_version = cpu_to_le32(version);
+ *cache_p = cache_ent;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
+}
+
+void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
+ uint32_t nlen, const char *name)
+{
+ struct virtio_gpu_ctx_create *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
+ cmd_p->hdr.ctx_id = cpu_to_le32(id);
+ cmd_p->nlen = cpu_to_le32(nlen);
+ strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
+ cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
+ uint32_t id)
+{
+ struct virtio_gpu_ctx_destroy *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
+ cmd_p->hdr.ctx_id = cpu_to_le32(id);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
+ uint32_t ctx_id,
+ uint32_t resource_id)
+{
+ struct virtio_gpu_ctx_resource *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
+ cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+}
+
+void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
+ uint32_t ctx_id,
+ uint32_t resource_id)
+{
+ struct virtio_gpu_ctx_resource *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
+ cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_resource_create_3d *rc_3d,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_resource_create_3d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ *cmd_p = *rc_3d;
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
+ cmd_p->hdr.flags = 0;
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id, uint32_t ctx_id,
+ uint64_t offset, uint32_t level,
+ struct virtio_gpu_box *box,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_transfer_host_3d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
+ cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->box = *box;
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->level = cpu_to_le32(level);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id, uint32_t ctx_id,
+ uint64_t offset, uint32_t level,
+ struct virtio_gpu_box *box,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_transfer_host_3d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
+ cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->box = *box;
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->level = cpu_to_le32(level);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
+ void *data, uint32_t data_size,
+ uint32_t ctx_id, struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_cmd_submit *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ vbuf->data_buf = data;
+ vbuf->data_size = data_size;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
+ cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ cmd_p->size = cpu_to_le32(data_size);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ uint32_t resource_id,
+ struct virtio_gpu_fence **fence)
+{
+ struct virtio_gpu_mem_entry *ents;
+ struct scatterlist *sg;
+ int si;
+
+ if (!obj->pages) {
+ int ret;
+ ret = virtio_gpu_object_get_sg_table(vgdev, obj);
+ if (ret)
+ return ret;
+ }
+
+ /* gets freed when the ring has consumed it */
+ ents = kmalloc_array(obj->pages->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!ents) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
+ ents[si].addr = cpu_to_le64(sg_phys(sg));
+ ents[si].length = cpu_to_le32(sg->length);
+ ents[si].padding = 0;
+ }
+
+ virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
+ ents, obj->pages->nents,
+ fence);
+ obj->hw_res_handle = resource_id;
+ return 0;
+}
+
+void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_output *output)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_update_cursor *cur_p;
+
+ output->cursor.pos.scanout_id = cpu_to_le32(output->index);
+ cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
+ memcpy(cur_p, &output->cursor, sizeof(output->cursor));
+ virtio_gpu_queue_cursor(vgdev, vbuf);
+}