summaryrefslogtreecommitdiffstats
path: root/qemu/include/hw/virtio/virtio-gpu.h
blob: 889676147aa33eeb022f4c57aef1741903ccb791 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
/*
 * Virtio GPU Device
 *
 * Copyright Red Hat, Inc. 2013-2014
 *
 * Authors:
 *     Dave Airlie <airlied@redhat.com>
 *     Gerd Hoffmann <kraxel@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 * See the COPYING file in the top-level directory.
 */

#ifndef _QEMU_VIRTIO_VGA_H
#define _QEMU_VIRTIO_VGA_H

#include "qemu/queue.h"
#include "ui/qemu-pixman.h"
#include "ui/console.h"
#include "hw/virtio/virtio.h"
#include "hw/pci/pci.h"

#include "standard-headers/linux/virtio_gpu.h"
#define TYPE_VIRTIO_GPU "virtio-gpu-device"
#define VIRTIO_GPU(obj)                                        \
        OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU)

#define VIRTIO_ID_GPU 16

#define VIRTIO_GPU_MAX_SCANOUT 4

struct virtio_gpu_simple_resource {
    uint32_t resource_id;
    uint32_t width;
    uint32_t height;
    uint32_t format;
    struct iovec *iov;
    unsigned int iov_cnt;
    uint32_t scanout_bitmask;
    pixman_image_t *image;
    QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
};

struct virtio_gpu_scanout {
    QemuConsole *con;
    DisplaySurface *ds;
    uint32_t width, height;
    int x, y;
    int invalidate;
    uint32_t resource_id;
    QEMUCursor *current_cursor;
};

struct virtio_gpu_requested_state {
    uint32_t width, height;
    int x, y;
};

struct virtio_gpu_conf {
    uint32_t max_outputs;
};

struct virtio_gpu_ctrl_command {
    VirtQueueElement elem;
    VirtQueue *vq;
    struct virtio_gpu_ctrl_hdr cmd_hdr;
    uint32_t error;
    bool finished;
    QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
};

typedef struct VirtIOGPU {
    VirtIODevice parent_obj;

    QEMUBH *ctrl_bh;
    QEMUBH *cursor_bh;
    VirtQueue *ctrl_vq;
    VirtQueue *cursor_vq;

    int enable;

    int config_size;
    DeviceState *qdev;

    QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
    QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;

    struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUT];
    struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUT];

    struct virtio_gpu_conf conf;
    int enabled_output_bitmask;
    struct virtio_gpu_config virtio_config;

    QEMUTimer *fence_poll;
    QEMUTimer *print_stats;

    struct {
        uint32_t inflight;
        uint32_t max_inflight;
        uint32_t requests;
        uint32_t req_3d;
        uint32_t bytes_3d;
    } stats;
} VirtIOGPU;

extern const GraphicHwOps virtio_gpu_ops;

/* to share between PCI and VGA */
#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state)               \
    DEFINE_PROP_BIT("ioeventfd", _state, flags,                \
                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
    DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)

#define VIRTIO_GPU_FILL_CMD(out) do {                                   \
        size_t s;                                                       \
        s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0,          \
                       &out, sizeof(out));                              \
        if (s != sizeof(out)) {                                         \
            qemu_log_mask(LOG_GUEST_ERROR,                              \
                          "%s: command size incorrect %zu vs %zu\n",    \
                          __func__, s, sizeof(out));                    \
            return;                                                     \
        }                                                               \
    } while (0)

/* virtio-gpu.c */
void virtio_gpu_ctrl_response(VirtIOGPU *g,
                              struct virtio_gpu_ctrl_command *cmd,
                              struct virtio_gpu_ctrl_hdr *resp,
                              size_t resp_len);
void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
                                     struct virtio_gpu_ctrl_command *cmd,
                                     enum virtio_gpu_ctrl_type type);
void virtio_gpu_get_display_info(VirtIOGPU *g,
                                 struct virtio_gpu_ctrl_command *cmd);
int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
                                  struct virtio_gpu_ctrl_command *cmd,
                                  struct iovec **iov);
void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count);

#endif
lass="p">(mddev, sector); maxsectors = dev0->end_sector - sector; subq = bdev_get_queue(dev0->rdev->bdev); if (subq->merge_bvec_fn) { bvm->bi_bdev = dev0->rdev->bdev; bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm, biovec)); } if (maxsectors < bio_sectors) maxsectors = 0; else maxsectors -= bio_sectors; if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0) return maxbytes; if (maxsectors > (maxbytes >> 9)) return maxbytes; else return maxsectors << 9; } static int linear_congested(struct mddev *mddev, int bits) { struct linear_conf *conf; int i, ret = 0; conf = mddev->private; for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } return ret; } static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) { struct linear_conf *conf; sector_t array_sectors; conf = mddev->private; WARN_ONCE(sectors || raid_disks, "%s does not support generic reshape\n", __func__); array_sectors = conf->array_sectors; return array_sectors; } static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) { struct linear_conf *conf; struct md_rdev *rdev; int i, cnt; bool discard_supported = false; conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), GFP_KERNEL); if (!conf) return NULL; cnt = 0; conf->array_sectors = 0; rdev_for_each(rdev, mddev) { int j = rdev->raid_disk; struct dev_info *disk = conf->disks + j; sector_t sectors; if (j < 0 || j >= raid_disks || disk->rdev) { printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n", mdname(mddev)); goto out; } disk->rdev = rdev; if (mddev->chunk_sectors) { sectors = rdev->sectors; sector_div(sectors, mddev->chunk_sectors); rdev->sectors = sectors * mddev->chunk_sectors; } disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); conf->array_sectors += rdev->sectors; cnt++; if (blk_queue_discard(bdev_get_queue(rdev->bdev))) discard_supported = true; } if (cnt != raid_disks) { printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", mdname(mddev)); goto out; } if (!discard_supported) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); /* * Here we calculate the device offsets. */ conf->disks[0].end_sector = conf->disks[0].rdev->sectors; for (i = 1; i < raid_disks; i++) conf->disks[i].end_sector = conf->disks[i-1].end_sector + conf->disks[i].rdev->sectors; return conf; out: kfree(conf); return NULL; } static int linear_run (struct mddev *mddev) { struct linear_conf *conf; int ret; if (md_check_no_bitmap(mddev)) return -EINVAL; conf = linear_conf(mddev, mddev->raid_disks); if (!conf) return 1; mddev->private = conf; md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); ret = md_integrity_register(mddev); if (ret) { kfree(conf); mddev->private = NULL; } return ret; } static int linear_add(struct mddev *mddev, struct md_rdev *rdev) { /* Adding a drive to a linear array allows the array to grow. * It is permitted if the new drive has a matching superblock * already on it, with raid_disk equal to raid_disks. * It is achieved by creating a new linear_private_data structure * and swapping it in in-place of the current one. * The current one is never freed until the array is stopped. * This avoids races. */ struct linear_conf *newconf, *oldconf; if (rdev->saved_raid_disk != mddev->raid_disks) return -EINVAL; rdev->raid_disk = rdev->saved_raid_disk; rdev->saved_raid_disk = -1; newconf = linear_conf(mddev,mddev->raid_disks+1); if (!newconf) return -ENOMEM; mddev_suspend(mddev); oldconf = mddev->private; mddev->raid_disks++; mddev->private = newconf; md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); mddev_resume(mddev); revalidate_disk(mddev->gendisk); kfree(oldconf); return 0; } static void linear_free(struct mddev *mddev, void *priv) { struct linear_conf *conf = priv; kfree(conf); } static void linear_make_request(struct mddev *mddev, struct bio *bio) { char b[BDEVNAME_SIZE]; struct dev_info *tmp_dev; struct bio *split; sector_t start_sector, end_sector, data_offset; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); return; } do { tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; end_sector = tmp_dev->end_sector; data_offset = tmp_dev->rdev->data_offset; bio->bi_bdev = tmp_dev->rdev->bdev; if (unlikely(bio->bi_iter.bi_sector >= end_sector || bio->bi_iter.bi_sector < start_sector)) goto out_of_bounds; if (unlikely(bio_end_sector(bio) > end_sector)) { /* This bio crosses a device boundary, so we have to * split it. */ split = bio_split(bio, end_sector - bio->bi_iter.bi_sector, GFP_NOIO, fs_bio_set); bio_chain(split, bio); } else { split = bio; } split->bi_iter.bi_sector = split->bi_iter.bi_sector - start_sector + data_offset; if (unlikely((split->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ bio_endio(split, 0); } else generic_make_request(split); } while (split != bio); return; out_of_bounds: printk(KERN_ERR "md/linear:%s: make_request: Sector %llu out of bounds on " "dev %s: %llu sectors, offset %llu\n", mdname(mddev), (unsigned long long)bio->bi_iter.bi_sector, bdevname(tmp_dev->rdev->bdev, b), (unsigned long long)tmp_dev->rdev->sectors, (unsigned long long)start_sector); bio_io_error(bio); } static void linear_status (struct seq_file *seq, struct mddev *mddev) { seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); } static void linear_quiesce(struct mddev *mddev, int state) { } static struct md_personality linear_personality = { .name = "linear", .level = LEVEL_LINEAR, .owner = THIS_MODULE, .make_request = linear_make_request, .run = linear_run, .free = linear_free, .status = linear_status, .hot_add_disk = linear_add, .size = linear_size, .quiesce = linear_quiesce, .congested = linear_congested, .mergeable_bvec = linear_mergeable_bvec, }; static int __init linear_init (void) { return register_md_personality (&linear_personality); } static void linear_exit (void) { unregister_md_personality (&linear_personality); } module_init(linear_init); module_exit(linear_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Linear device concatenation personality for MD"); MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ MODULE_ALIAS("md-linear"); MODULE_ALIAS("md-level--1");