summaryrefslogtreecommitdiffstats
path: root/qemu/contrib
diff options
context:
space:
mode:
authorDon Dugger <n0ano@n0ano.com>2016-06-03 03:33:22 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-06-03 03:33:23 +0000
commitda27230f80795d0028333713f036d44c53cb0e68 (patch)
treeb3d379eaf000adf72b36cb01cdf4d79c3e3f064c /qemu/contrib
parent0e68cb048bb8aadb14675f5d4286d8ab2fc35449 (diff)
parent437fd90c0250dee670290f9b714253671a990160 (diff)
Merge "These changes are the raw update to qemu-2.6."
Diffstat (limited to 'qemu/contrib')
-rw-r--r--qemu/contrib/ivshmem-client/Makefile.objs1
-rw-r--r--qemu/contrib/ivshmem-client/ivshmem-client.c446
-rw-r--r--qemu/contrib/ivshmem-client/ivshmem-client.h212
-rw-r--r--qemu/contrib/ivshmem-client/main.c241
-rw-r--r--qemu/contrib/ivshmem-server/Makefile.objs1
-rw-r--r--qemu/contrib/ivshmem-server/ivshmem-server.c457
-rw-r--r--qemu/contrib/ivshmem-server/ivshmem-server.h166
-rw-r--r--qemu/contrib/ivshmem-server/main.c273
8 files changed, 1797 insertions, 0 deletions
diff --git a/qemu/contrib/ivshmem-client/Makefile.objs b/qemu/contrib/ivshmem-client/Makefile.objs
new file mode 100644
index 000000000..bfab2d20d
--- /dev/null
+++ b/qemu/contrib/ivshmem-client/Makefile.objs
@@ -0,0 +1 @@
+ivshmem-client-obj-y = ivshmem-client.o main.o
diff --git a/qemu/contrib/ivshmem-client/ivshmem-client.c b/qemu/contrib/ivshmem-client/ivshmem-client.c
new file mode 100644
index 000000000..44ae3646e
--- /dev/null
+++ b/qemu/contrib/ivshmem-client/ivshmem-client.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "qemu-common.h"
+#include "qemu/queue.h"
+
+#include "ivshmem-client.h"
+
+/* log a message on stdout if verbose=1 */
+#define IVSHMEM_CLIENT_DEBUG(client, fmt, ...) do { \
+ if ((client)->verbose) { \
+ printf(fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/* read message from the unix socket */
+static int
+ivshmem_client_read_one_msg(IvshmemClient *client, int64_t *index, int *fd)
+{
+ int ret;
+ struct msghdr msg;
+ struct iovec iov[1];
+ union {
+ struct cmsghdr cmsg;
+ char control[CMSG_SPACE(sizeof(int))];
+ } msg_control;
+ struct cmsghdr *cmsg;
+
+ iov[0].iov_base = index;
+ iov[0].iov_len = sizeof(*index);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &msg_control;
+ msg.msg_controllen = sizeof(msg_control);
+
+ ret = recvmsg(client->sock_fd, &msg, 0);
+ if (ret < sizeof(*index)) {
+ IVSHMEM_CLIENT_DEBUG(client, "cannot read message: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ if (ret == 0) {
+ IVSHMEM_CLIENT_DEBUG(client, "lost connection to server\n");
+ return -1;
+ }
+
+ *index = GINT64_FROM_LE(*index);
+ *fd = -1;
+
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
+ cmsg->cmsg_level != SOL_SOCKET ||
+ cmsg->cmsg_type != SCM_RIGHTS) {
+ continue;
+ }
+
+ memcpy(fd, CMSG_DATA(cmsg), sizeof(*fd));
+ }
+
+ return 0;
+}
+
+/* free a peer when the server advertises a disconnection or when the
+ * client is freed */
+static void
+ivshmem_client_free_peer(IvshmemClient *client, IvshmemClientPeer *peer)
+{
+ unsigned vector;
+
+ QTAILQ_REMOVE(&client->peer_list, peer, next);
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ close(peer->vectors[vector]);
+ }
+
+ g_free(peer);
+}
+
+/* handle message coming from server (new peer, new vectors) */
+static int
+ivshmem_client_handle_server_msg(IvshmemClient *client)
+{
+ IvshmemClientPeer *peer;
+ int64_t peer_id;
+ int ret, fd;
+
+ ret = ivshmem_client_read_one_msg(client, &peer_id, &fd);
+ if (ret < 0) {
+ return -1;
+ }
+
+ /* can return a peer or the local client */
+ peer = ivshmem_client_search_peer(client, peer_id);
+
+ /* delete peer */
+ if (fd == -1) {
+
+ if (peer == NULL || peer == &client->local) {
+ IVSHMEM_CLIENT_DEBUG(client, "receive delete for invalid "
+ "peer %" PRId64 "\n", peer_id);
+ return -1;
+ }
+
+ IVSHMEM_CLIENT_DEBUG(client, "delete peer id = %" PRId64 "\n", peer_id);
+ ivshmem_client_free_peer(client, peer);
+ return 0;
+ }
+
+ /* new peer */
+ if (peer == NULL) {
+ peer = g_malloc0(sizeof(*peer));
+ peer->id = peer_id;
+ peer->vectors_count = 0;
+ QTAILQ_INSERT_TAIL(&client->peer_list, peer, next);
+ IVSHMEM_CLIENT_DEBUG(client, "new peer id = %" PRId64 "\n", peer_id);
+ }
+
+ /* new vector */
+ IVSHMEM_CLIENT_DEBUG(client, " new vector %d (fd=%d) for peer id %"
+ PRId64 "\n", peer->vectors_count, fd, peer->id);
+ if (peer->vectors_count >= G_N_ELEMENTS(peer->vectors)) {
+ IVSHMEM_CLIENT_DEBUG(client, "Too many vectors received, failing");
+ return -1;
+ }
+
+ peer->vectors[peer->vectors_count] = fd;
+ peer->vectors_count++;
+
+ return 0;
+}
+
+/* init a new ivshmem client */
+int
+ivshmem_client_init(IvshmemClient *client, const char *unix_sock_path,
+ IvshmemClientNotifCb notif_cb, void *notif_arg,
+ bool verbose)
+{
+ int ret;
+ unsigned i;
+
+ memset(client, 0, sizeof(*client));
+
+ ret = snprintf(client->unix_sock_path, sizeof(client->unix_sock_path),
+ "%s", unix_sock_path);
+
+ if (ret < 0 || ret >= sizeof(client->unix_sock_path)) {
+ IVSHMEM_CLIENT_DEBUG(client, "could not copy unix socket path\n");
+ return -1;
+ }
+
+ for (i = 0; i < IVSHMEM_CLIENT_MAX_VECTORS; i++) {
+ client->local.vectors[i] = -1;
+ }
+
+ QTAILQ_INIT(&client->peer_list);
+ client->local.id = -1;
+
+ client->notif_cb = notif_cb;
+ client->notif_arg = notif_arg;
+ client->verbose = verbose;
+ client->shm_fd = -1;
+ client->sock_fd = -1;
+
+ return 0;
+}
+
+/* create and connect to the unix socket */
+int
+ivshmem_client_connect(IvshmemClient *client)
+{
+ struct sockaddr_un sun;
+ int fd, ret;
+ int64_t tmp;
+
+ IVSHMEM_CLIENT_DEBUG(client, "connect to client %s\n",
+ client->unix_sock_path);
+
+ client->sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (client->sock_fd < 0) {
+ IVSHMEM_CLIENT_DEBUG(client, "cannot create socket: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ sun.sun_family = AF_UNIX;
+ ret = snprintf(sun.sun_path, sizeof(sun.sun_path), "%s",
+ client->unix_sock_path);
+ if (ret < 0 || ret >= sizeof(sun.sun_path)) {
+ IVSHMEM_CLIENT_DEBUG(client, "could not copy unix socket path\n");
+ goto err_close;
+ }
+
+ if (connect(client->sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
+ IVSHMEM_CLIENT_DEBUG(client, "cannot connect to %s: %s\n", sun.sun_path,
+ strerror(errno));
+ goto err_close;
+ }
+
+ /* first, we expect a protocol version */
+ if (ivshmem_client_read_one_msg(client, &tmp, &fd) < 0 ||
+ (tmp != IVSHMEM_PROTOCOL_VERSION) || fd != -1) {
+ IVSHMEM_CLIENT_DEBUG(client, "cannot read from server\n");
+ goto err_close;
+ }
+
+ /* then, we expect our index + a fd == -1 */
+ if (ivshmem_client_read_one_msg(client, &client->local.id, &fd) < 0 ||
+ client->local.id < 0 || fd != -1) {
+ IVSHMEM_CLIENT_DEBUG(client, "cannot read from server (2)\n");
+ goto err_close;
+ }
+ IVSHMEM_CLIENT_DEBUG(client, "our_id=%" PRId64 "\n", client->local.id);
+
+ /* now, we expect shared mem fd + a -1 index, note that shm fd
+ * is not used */
+ if (ivshmem_client_read_one_msg(client, &tmp, &fd) < 0 ||
+ tmp != -1 || fd < 0) {
+ if (fd >= 0) {
+ close(fd);
+ }
+ IVSHMEM_CLIENT_DEBUG(client, "cannot read from server (3)\n");
+ goto err_close;
+ }
+ client->shm_fd = fd;
+ IVSHMEM_CLIENT_DEBUG(client, "shm_fd=%d\n", fd);
+
+ return 0;
+
+err_close:
+ close(client->sock_fd);
+ client->sock_fd = -1;
+ return -1;
+}
+
+/* close connection to the server, and free all peer structures */
+void
+ivshmem_client_close(IvshmemClient *client)
+{
+ IvshmemClientPeer *peer;
+ unsigned i;
+
+ IVSHMEM_CLIENT_DEBUG(client, "close client\n");
+
+ while ((peer = QTAILQ_FIRST(&client->peer_list)) != NULL) {
+ ivshmem_client_free_peer(client, peer);
+ }
+
+ close(client->shm_fd);
+ client->shm_fd = -1;
+ close(client->sock_fd);
+ client->sock_fd = -1;
+ client->local.id = -1;
+ for (i = 0; i < IVSHMEM_CLIENT_MAX_VECTORS; i++) {
+ close(client->local.vectors[i]);
+ client->local.vectors[i] = -1;
+ }
+ client->local.vectors_count = 0;
+}
+
+/* get the fd_set according to the unix socket and peer list */
+void
+ivshmem_client_get_fds(const IvshmemClient *client, fd_set *fds, int *maxfd)
+{
+ int fd;
+ unsigned vector;
+
+ FD_SET(client->sock_fd, fds);
+ if (client->sock_fd >= *maxfd) {
+ *maxfd = client->sock_fd + 1;
+ }
+
+ for (vector = 0; vector < client->local.vectors_count; vector++) {
+ fd = client->local.vectors[vector];
+ FD_SET(fd, fds);
+ if (fd >= *maxfd) {
+ *maxfd = fd + 1;
+ }
+ }
+}
+
+/* handle events from eventfd: just print a message on notification */
+static int
+ivshmem_client_handle_event(IvshmemClient *client, const fd_set *cur, int maxfd)
+{
+ IvshmemClientPeer *peer;
+ uint64_t kick;
+ unsigned i;
+ int ret;
+
+ peer = &client->local;
+
+ for (i = 0; i < peer->vectors_count; i++) {
+ if (peer->vectors[i] >= maxfd || !FD_ISSET(peer->vectors[i], cur)) {
+ continue;
+ }
+
+ ret = read(peer->vectors[i], &kick, sizeof(kick));
+ if (ret < 0) {
+ return ret;
+ }
+ if (ret != sizeof(kick)) {
+ IVSHMEM_CLIENT_DEBUG(client, "invalid read size = %d\n", ret);
+ errno = EINVAL;
+ return -1;
+ }
+ IVSHMEM_CLIENT_DEBUG(client, "received event on fd %d vector %d: %"
+ PRIu64 "\n", peer->vectors[i], i, kick);
+ if (client->notif_cb != NULL) {
+ client->notif_cb(client, peer, i, client->notif_arg);
+ }
+ }
+
+ return 0;
+}
+
+/* read and handle new messages on the given fd_set */
+int
+ivshmem_client_handle_fds(IvshmemClient *client, fd_set *fds, int maxfd)
+{
+ if (client->sock_fd < maxfd && FD_ISSET(client->sock_fd, fds) &&
+ ivshmem_client_handle_server_msg(client) < 0 && errno != EINTR) {
+ IVSHMEM_CLIENT_DEBUG(client, "ivshmem_client_handle_server_msg() "
+ "failed\n");
+ return -1;
+ } else if (ivshmem_client_handle_event(client, fds, maxfd) < 0 &&
+ errno != EINTR) {
+ IVSHMEM_CLIENT_DEBUG(client, "ivshmem_client_handle_event() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* send a notification on a vector of a peer */
+int
+ivshmem_client_notify(const IvshmemClient *client,
+ const IvshmemClientPeer *peer, unsigned vector)
+{
+ uint64_t kick;
+ int fd;
+
+ if (vector >= peer->vectors_count) {
+ IVSHMEM_CLIENT_DEBUG(client, "invalid vector %u on peer %" PRId64 "\n",
+ vector, peer->id);
+ return -1;
+ }
+ fd = peer->vectors[vector];
+ IVSHMEM_CLIENT_DEBUG(client, "notify peer %" PRId64
+ " on vector %d, fd %d\n", peer->id, vector, fd);
+
+ kick = 1;
+ if (write(fd, &kick, sizeof(kick)) != sizeof(kick)) {
+ fprintf(stderr, "could not write to %d: %s\n", peer->vectors[vector],
+ strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+/* send a notification to all vectors of a peer */
+int
+ivshmem_client_notify_all_vects(const IvshmemClient *client,
+ const IvshmemClientPeer *peer)
+{
+ unsigned vector;
+ int ret = 0;
+
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ if (ivshmem_client_notify(client, peer, vector) < 0) {
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+/* send a notification to all peers */
+int
+ivshmem_client_notify_broadcast(const IvshmemClient *client)
+{
+ IvshmemClientPeer *peer;
+ int ret = 0;
+
+ QTAILQ_FOREACH(peer, &client->peer_list, next) {
+ if (ivshmem_client_notify_all_vects(client, peer) < 0) {
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+/* lookup peer from its id */
+IvshmemClientPeer *
+ivshmem_client_search_peer(IvshmemClient *client, int64_t peer_id)
+{
+ IvshmemClientPeer *peer;
+
+ if (peer_id == client->local.id) {
+ return &client->local;
+ }
+
+ QTAILQ_FOREACH(peer, &client->peer_list, next) {
+ if (peer->id == peer_id) {
+ return peer;
+ }
+ }
+ return NULL;
+}
+
+/* dump our info, the list of peers their vectors on stdout */
+void
+ivshmem_client_dump(const IvshmemClient *client)
+{
+ const IvshmemClientPeer *peer;
+ unsigned vector;
+
+ /* dump local infos */
+ peer = &client->local;
+ printf("our_id = %" PRId64 "\n", peer->id);
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ printf(" vector %d is enabled (fd=%d)\n", vector,
+ peer->vectors[vector]);
+ }
+
+ /* dump peers */
+ QTAILQ_FOREACH(peer, &client->peer_list, next) {
+ printf("peer_id = %" PRId64 "\n", peer->id);
+
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ printf(" vector %d is enabled (fd=%d)\n", vector,
+ peer->vectors[vector]);
+ }
+ }
+}
diff --git a/qemu/contrib/ivshmem-client/ivshmem-client.h b/qemu/contrib/ivshmem-client/ivshmem-client.h
new file mode 100644
index 000000000..54cde17d9
--- /dev/null
+++ b/qemu/contrib/ivshmem-client/ivshmem-client.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef _IVSHMEM_CLIENT_H_
+#define _IVSHMEM_CLIENT_H_
+
+/**
+ * This file provides helper to implement an ivshmem client. It is used
+ * on the host to ask QEMU to send an interrupt to an ivshmem PCI device in a
+ * guest. QEMU also implements an ivshmem client similar to this one, they both
+ * connect to an ivshmem server.
+ *
+ * A standalone ivshmem client based on this file is provided for debug/test
+ * purposes.
+ */
+
+#include <sys/select.h>
+
+#include "qemu/queue.h"
+#include "hw/misc/ivshmem.h"
+
+/**
+ * Maximum number of notification vectors supported by the client
+ */
+#define IVSHMEM_CLIENT_MAX_VECTORS 64
+
+/**
+ * Structure storing a peer
+ *
+ * Each time a client connects to an ivshmem server, it is advertised to
+ * all connected clients through the unix socket. When our ivshmem
+ * client receives a notification, it creates a IvshmemClientPeer
+ * structure to store the infos of this peer.
+ *
+ * This structure is also used to store the information of our own
+ * client in (IvshmemClient)->local.
+ */
+typedef struct IvshmemClientPeer {
+ QTAILQ_ENTRY(IvshmemClientPeer) next; /**< next in list*/
+ int64_t id; /**< the id of the peer */
+ int vectors[IVSHMEM_CLIENT_MAX_VECTORS]; /**< one fd per vector */
+ unsigned vectors_count; /**< number of vectors */
+} IvshmemClientPeer;
+QTAILQ_HEAD(IvshmemClientPeerList, IvshmemClientPeer);
+
+typedef struct IvshmemClientPeerList IvshmemClientPeerList;
+typedef struct IvshmemClient IvshmemClient;
+
+/**
+ * Typedef of callback function used when our IvshmemClient receives a
+ * notification from a peer.
+ */
+typedef void (*IvshmemClientNotifCb)(
+ const IvshmemClient *client,
+ const IvshmemClientPeer *peer,
+ unsigned vect, void *arg);
+
+/**
+ * Structure describing an ivshmem client
+ *
+ * This structure stores all information related to our client: the name
+ * of the server unix socket, the list of peers advertised by the
+ * server, our own client information, and a pointer the notification
+ * callback function used when we receive a notification from a peer.
+ */
+struct IvshmemClient {
+ char unix_sock_path[PATH_MAX]; /**< path to unix sock */
+ int sock_fd; /**< unix sock filedesc */
+ int shm_fd; /**< shm file descriptor */
+
+ IvshmemClientPeerList peer_list; /**< list of peers */
+ IvshmemClientPeer local; /**< our own infos */
+
+ IvshmemClientNotifCb notif_cb; /**< notification callback */
+ void *notif_arg; /**< notification argument */
+
+ bool verbose; /**< true to enable debug */
+};
+
+/**
+ * Initialize an ivshmem client
+ *
+ * @client: A pointer to an uninitialized IvshmemClient structure
+ * @unix_sock_path: The pointer to the unix socket file name
+ * @notif_cb: If not NULL, the pointer to the function to be called when
+ * our IvshmemClient receives a notification from a peer
+ * @notif_arg: Opaque pointer given as-is to the notification callback
+ * function
+ * @verbose: True to enable debug
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_client_init(IvshmemClient *client, const char *unix_sock_path,
+ IvshmemClientNotifCb notif_cb, void *notif_arg,
+ bool verbose);
+
+/**
+ * Connect to the server
+ *
+ * Connect to the server unix socket, and read the first initial
+ * messages sent by the server, giving the ID of the client and the file
+ * descriptor of the shared memory.
+ *
+ * @client: The ivshmem client
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_client_connect(IvshmemClient *client);
+
+/**
+ * Close connection to the server and free all peer structures
+ *
+ * @client: The ivshmem client
+ */
+void ivshmem_client_close(IvshmemClient *client);
+
+/**
+ * Fill a fd_set with file descriptors to be monitored
+ *
+ * This function will fill a fd_set with all file descriptors
+ * that must be polled (unix server socket and peers eventfd). The
+ * function will not initialize the fd_set, it is up to the caller
+ * to do this.
+ *
+ * @client: The ivshmem client
+ * @fds: The fd_set to be updated
+ * @maxfd: Must be set to the max file descriptor + 1 in fd_set. This value is
+ * updated if this function adds a greater fd in fd_set.
+ */
+void ivshmem_client_get_fds(const IvshmemClient *client, fd_set *fds,
+ int *maxfd);
+
+/**
+ * Read and handle new messages
+ *
+ * Given a fd_set filled by select(), handle incoming messages from
+ * server or peers.
+ *
+ * @client: The ivshmem client
+ * @fds: The fd_set containing the file descriptors to be checked. Note
+ * that file descriptors that are not related to our client are
+ * ignored.
+ * @maxfd: The maximum fd in fd_set, plus one.
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_client_handle_fds(IvshmemClient *client, fd_set *fds, int maxfd);
+
+/**
+ * Send a notification to a vector of a peer
+ *
+ * @client: The ivshmem client
+ * @peer: The peer to be notified
+ * @vector: The number of the vector
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_client_notify(const IvshmemClient *client,
+ const IvshmemClientPeer *peer, unsigned vector);
+
+/**
+ * Send a notification to all vectors of a peer
+ *
+ * @client: The ivshmem client
+ * @peer: The peer to be notified
+ *
+ * Returns: 0 on success, or a negative value on error (at least one
+ * notification failed)
+ */
+int ivshmem_client_notify_all_vects(const IvshmemClient *client,
+ const IvshmemClientPeer *peer);
+
+/**
+ * Broadcat a notification to all vectors of all peers
+ *
+ * @client: The ivshmem client
+ *
+ * Returns: 0 on success, or a negative value on error (at least one
+ * notification failed)
+ */
+int ivshmem_client_notify_broadcast(const IvshmemClient *client);
+
+/**
+ * Search a peer from its identifier
+ *
+ * Return the peer structure from its peer_id. If the given peer_id is
+ * the local id, the function returns the local peer structure.
+ *
+ * @client: The ivshmem client
+ * @peer_id: The identifier of the peer structure
+ *
+ * Returns: The peer structure, or NULL if not found
+ */
+IvshmemClientPeer *
+ivshmem_client_search_peer(IvshmemClient *client, int64_t peer_id);
+
+/**
+ * Dump information of this ivshmem client on stdout
+ *
+ * Dump the id and the vectors of the given ivshmem client and the list
+ * of its peers and their vectors on stdout.
+ *
+ * @client: The ivshmem client
+ */
+void ivshmem_client_dump(const IvshmemClient *client);
+
+#endif /* _IVSHMEM_CLIENT_H_ */
diff --git a/qemu/contrib/ivshmem-client/main.c b/qemu/contrib/ivshmem-client/main.c
new file mode 100644
index 000000000..33ae1daa1
--- /dev/null
+++ b/qemu/contrib/ivshmem-client/main.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+
+#include "ivshmem-client.h"
+
+#define IVSHMEM_CLIENT_DEFAULT_VERBOSE 0
+#define IVSHMEM_CLIENT_DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
+
+typedef struct IvshmemClientArgs {
+ bool verbose;
+ const char *unix_sock_path;
+} IvshmemClientArgs;
+
+/* show ivshmem_client_usage and exit with given error code */
+static void
+ivshmem_client_usage(const char *name, int code)
+{
+ fprintf(stderr, "%s [opts]\n", name);
+ fprintf(stderr, " -h: show this help\n");
+ fprintf(stderr, " -v: verbose mode\n");
+ fprintf(stderr, " -S <unix_sock_path>: path to the unix socket\n"
+ " to connect to.\n"
+ " default=%s\n", IVSHMEM_CLIENT_DEFAULT_UNIX_SOCK_PATH);
+ exit(code);
+}
+
+/* parse the program arguments, exit on error */
+static void
+ivshmem_client_parse_args(IvshmemClientArgs *args, int argc, char *argv[])
+{
+ int c;
+
+ while ((c = getopt(argc, argv,
+ "h" /* help */
+ "v" /* verbose */
+ "S:" /* unix_sock_path */
+ )) != -1) {
+
+ switch (c) {
+ case 'h': /* help */
+ ivshmem_client_usage(argv[0], 0);
+ break;
+
+ case 'v': /* verbose */
+ args->verbose = 1;
+ break;
+
+ case 'S': /* unix_sock_path */
+ args->unix_sock_path = optarg;
+ break;
+
+ default:
+ ivshmem_client_usage(argv[0], 1);
+ break;
+ }
+ }
+}
+
+/* show command line help */
+static void
+ivshmem_client_cmdline_help(void)
+{
+ printf("dump: dump peers (including us)\n"
+ "int <peer> <vector>: notify one vector on a peer\n"
+ "int <peer> all: notify all vectors of a peer\n"
+ "int all: notify all vectors of all peers (excepting us)\n");
+}
+
+/* read stdin and handle commands */
+static int
+ivshmem_client_handle_stdin_command(IvshmemClient *client)
+{
+ IvshmemClientPeer *peer;
+ char buf[128];
+ char *s, *token;
+ int ret;
+ int peer_id, vector;
+
+ memset(buf, 0, sizeof(buf));
+ ret = read(0, buf, sizeof(buf) - 1);
+ if (ret < 0) {
+ return -1;
+ }
+
+ s = buf;
+ while ((token = strsep(&s, "\n\r;")) != NULL) {
+ if (!strcmp(token, "")) {
+ continue;
+ }
+ if (!strcmp(token, "?")) {
+ ivshmem_client_cmdline_help();
+ }
+ if (!strcmp(token, "help")) {
+ ivshmem_client_cmdline_help();
+ } else if (!strcmp(token, "dump")) {
+ ivshmem_client_dump(client);
+ } else if (!strcmp(token, "int all")) {
+ ivshmem_client_notify_broadcast(client);
+ } else if (sscanf(token, "int %d %d", &peer_id, &vector) == 2) {
+ peer = ivshmem_client_search_peer(client, peer_id);
+ if (peer == NULL) {
+ printf("cannot find peer_id = %d\n", peer_id);
+ continue;
+ }
+ ivshmem_client_notify(client, peer, vector);
+ } else if (sscanf(token, "int %d all", &peer_id) == 1) {
+ peer = ivshmem_client_search_peer(client, peer_id);
+ if (peer == NULL) {
+ printf("cannot find peer_id = %d\n", peer_id);
+ continue;
+ }
+ ivshmem_client_notify_all_vects(client, peer);
+ } else {
+ printf("invalid command, type help\n");
+ }
+ }
+
+ printf("cmd> ");
+ fflush(stdout);
+ return 0;
+}
+
+/* listen on stdin (command line), on unix socket (notifications of new
+ * and dead peers), and on eventfd (IRQ request) */
+static int
+ivshmem_client_poll_events(IvshmemClient *client)
+{
+ fd_set fds;
+ int ret, maxfd;
+
+ while (1) {
+
+ FD_ZERO(&fds);
+ FD_SET(0, &fds); /* add stdin in fd_set */
+ maxfd = 1;
+
+ ivshmem_client_get_fds(client, &fds, &maxfd);
+
+ ret = select(maxfd, &fds, NULL, NULL, NULL);
+ if (ret < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+
+ fprintf(stderr, "select error: %s\n", strerror(errno));
+ break;
+ }
+ if (ret == 0) {
+ continue;
+ }
+
+ if (FD_ISSET(0, &fds) &&
+ ivshmem_client_handle_stdin_command(client) < 0 && errno != EINTR) {
+ fprintf(stderr, "ivshmem_client_handle_stdin_command() failed\n");
+ break;
+ }
+
+ if (ivshmem_client_handle_fds(client, &fds, maxfd) < 0) {
+ fprintf(stderr, "ivshmem_client_handle_fds() failed\n");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* callback when we receive a notification (just display it) */
+static void
+ivshmem_client_notification_cb(const IvshmemClient *client,
+ const IvshmemClientPeer *peer,
+ unsigned vect, void *arg)
+{
+ (void)client;
+ (void)arg;
+ printf("receive notification from peer_id=%" PRId64 " vector=%u\n",
+ peer->id, vect);
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct sigaction sa;
+ IvshmemClient client;
+ IvshmemClientArgs args = {
+ .verbose = IVSHMEM_CLIENT_DEFAULT_VERBOSE,
+ .unix_sock_path = IVSHMEM_CLIENT_DEFAULT_UNIX_SOCK_PATH,
+ };
+
+ /* parse arguments, will exit on error */
+ ivshmem_client_parse_args(&args, argc, argv);
+
+ /* Ignore SIGPIPE, see this link for more info:
+ * http://www.mail-archive.com/libevent-users@monkey.org/msg01606.html */
+ sa.sa_handler = SIG_IGN;
+ sa.sa_flags = 0;
+ if (sigemptyset(&sa.sa_mask) == -1 ||
+ sigaction(SIGPIPE, &sa, 0) == -1) {
+ perror("failed to ignore SIGPIPE; sigaction");
+ return 1;
+ }
+
+ ivshmem_client_cmdline_help();
+ printf("cmd> ");
+ fflush(stdout);
+
+ if (ivshmem_client_init(&client, args.unix_sock_path,
+ ivshmem_client_notification_cb, NULL,
+ args.verbose) < 0) {
+ fprintf(stderr, "cannot init client\n");
+ return 1;
+ }
+
+ while (1) {
+ if (ivshmem_client_connect(&client) < 0) {
+ fprintf(stderr, "cannot connect to server, retry in 1 second\n");
+ sleep(1);
+ continue;
+ }
+
+ fprintf(stdout, "listen on server socket %d\n", client.sock_fd);
+
+ if (ivshmem_client_poll_events(&client) == 0) {
+ continue;
+ }
+
+ /* disconnected from server, reset all peers */
+ fprintf(stdout, "disconnected from server\n");
+
+ ivshmem_client_close(&client);
+ }
+
+ return 0;
+}
diff --git a/qemu/contrib/ivshmem-server/Makefile.objs b/qemu/contrib/ivshmem-server/Makefile.objs
new file mode 100644
index 000000000..c060dd369
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/Makefile.objs
@@ -0,0 +1 @@
+ivshmem-server-obj-y = ivshmem-server.o main.o
diff --git a/qemu/contrib/ivshmem-server/ivshmem-server.c b/qemu/contrib/ivshmem-server/ivshmem-server.c
new file mode 100644
index 000000000..172db78b3
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/ivshmem-server.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/sockets.h"
+
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "ivshmem-server.h"
+
+/* log a message on stdout if verbose=1 */
+#define IVSHMEM_SERVER_DEBUG(server, fmt, ...) do { \
+ if ((server)->verbose) { \
+ printf(fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/** maximum size of a huge page, used by ivshmem_server_ftruncate() */
+#define IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE (1024 * 1024 * 1024)
+
+/** default listen backlog (number of sockets not accepted) */
+#define IVSHMEM_SERVER_LISTEN_BACKLOG 10
+
+/* send message to a client unix socket */
+static int
+ivshmem_server_send_one_msg(int sock_fd, int64_t peer_id, int fd)
+{
+ int ret;
+ struct msghdr msg;
+ struct iovec iov[1];
+ union {
+ struct cmsghdr cmsg;
+ char control[CMSG_SPACE(sizeof(int))];
+ } msg_control;
+ struct cmsghdr *cmsg;
+
+ peer_id = GINT64_TO_LE(peer_id);
+ iov[0].iov_base = &peer_id;
+ iov[0].iov_len = sizeof(peer_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+
+ /* if fd is specified, add it in a cmsg */
+ if (fd >= 0) {
+ memset(&msg_control, 0, sizeof(msg_control));
+ msg.msg_control = &msg_control;
+ msg.msg_controllen = sizeof(msg_control);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd));
+ }
+
+ ret = sendmsg(sock_fd, &msg, 0);
+ if (ret <= 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* free a peer when the server advertises a disconnection or when the
+ * server is freed */
+static void
+ivshmem_server_free_peer(IvshmemServer *server, IvshmemServerPeer *peer)
+{
+ unsigned vector;
+ IvshmemServerPeer *other_peer;
+
+ IVSHMEM_SERVER_DEBUG(server, "free peer %" PRId64 "\n", peer->id);
+ close(peer->sock_fd);
+ QTAILQ_REMOVE(&server->peer_list, peer, next);
+
+ /* advertise the deletion to other peers */
+ QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
+ ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id, -1);
+ }
+
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ event_notifier_cleanup(&peer->vectors[vector]);
+ }
+
+ g_free(peer);
+}
+
+/* send the peer id and the shm_fd just after a new client connection */
+static int
+ivshmem_server_send_initial_info(IvshmemServer *server, IvshmemServerPeer *peer)
+{
+ int ret;
+
+ /* send our protocol version first */
+ ret = ivshmem_server_send_one_msg(peer->sock_fd, IVSHMEM_PROTOCOL_VERSION,
+ -1);
+ if (ret < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send version: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /* send the peer id to the client */
+ ret = ivshmem_server_send_one_msg(peer->sock_fd, peer->id, -1);
+ if (ret < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send peer id: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /* send the shm_fd */
+ ret = ivshmem_server_send_one_msg(peer->sock_fd, -1, server->shm_fd);
+ if (ret < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send shm fd: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* handle message on listening unix socket (new client connection) */
+static int
+ivshmem_server_handle_new_conn(IvshmemServer *server)
+{
+ IvshmemServerPeer *peer, *other_peer;
+ struct sockaddr_un unaddr;
+ socklen_t unaddr_len;
+ int newfd;
+ unsigned i;
+
+ /* accept the incoming connection */
+ unaddr_len = sizeof(unaddr);
+ newfd = qemu_accept(server->sock_fd,
+ (struct sockaddr *)&unaddr, &unaddr_len);
+
+ if (newfd < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot accept() %s\n", strerror(errno));
+ return -1;
+ }
+
+ qemu_set_nonblock(newfd);
+ IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd);
+
+ /* allocate new structure for this peer */
+ peer = g_malloc0(sizeof(*peer));
+ peer->sock_fd = newfd;
+
+ /* get an unused peer id */
+ /* XXX: this could use id allocation such as Linux IDA, or simply
+ * a free-list */
+ for (i = 0; i < G_MAXUINT16; i++) {
+ if (ivshmem_server_search_peer(server, server->cur_id) == NULL) {
+ break;
+ }
+ server->cur_id++;
+ }
+ if (i == G_MAXUINT16) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot allocate new client id\n");
+ close(newfd);
+ g_free(peer);
+ return -1;
+ }
+ peer->id = server->cur_id++;
+
+ /* create eventfd, one per vector */
+ peer->vectors_count = server->n_vectors;
+ for (i = 0; i < peer->vectors_count; i++) {
+ if (event_notifier_init(&peer->vectors[i], FALSE) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot create eventfd\n");
+ goto fail;
+ }
+ }
+
+ /* send peer id and shm fd */
+ if (ivshmem_server_send_initial_info(server, peer) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send initial info\n");
+ goto fail;
+ }
+
+ /* advertise the new peer to others */
+ QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
+ for (i = 0; i < peer->vectors_count; i++) {
+ ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id,
+ peer->vectors[i].wfd);
+ }
+ }
+
+ /* advertise the other peers to the new one */
+ QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
+ for (i = 0; i < peer->vectors_count; i++) {
+ ivshmem_server_send_one_msg(peer->sock_fd, other_peer->id,
+ other_peer->vectors[i].wfd);
+ }
+ }
+
+ /* advertise the new peer to itself */
+ for (i = 0; i < peer->vectors_count; i++) {
+ ivshmem_server_send_one_msg(peer->sock_fd, peer->id,
+ event_notifier_get_fd(&peer->vectors[i]));
+ }
+
+ QTAILQ_INSERT_TAIL(&server->peer_list, peer, next);
+ IVSHMEM_SERVER_DEBUG(server, "new peer id = %" PRId64 "\n",
+ peer->id);
+ return 0;
+
+fail:
+ while (i--) {
+ event_notifier_cleanup(&peer->vectors[i]);
+ }
+ close(newfd);
+ g_free(peer);
+ return -1;
+}
+
+/* Try to ftruncate a file to next power of 2 of shmsize.
+ * If it fails; all power of 2 above shmsize are tested until
+ * we reach the maximum huge page size. This is useful
+ * if the shm file is in a hugetlbfs that cannot be truncated to the
+ * shm_size value. */
+static int
+ivshmem_server_ftruncate(int fd, unsigned shmsize)
+{
+ int ret;
+ struct stat mapstat;
+
+ /* align shmsize to next power of 2 */
+ shmsize = pow2ceil(shmsize);
+
+ if (fstat(fd, &mapstat) != -1 && mapstat.st_size == shmsize) {
+ return 0;
+ }
+
+ while (shmsize <= IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE) {
+ ret = ftruncate(fd, shmsize);
+ if (ret == 0) {
+ return ret;
+ }
+ shmsize *= 2;
+ }
+
+ return -1;
+}
+
+/* Init a new ivshmem server */
+int
+ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
+ const char *shm_path, bool use_shm_open,
+ size_t shm_size, unsigned n_vectors,
+ bool verbose)
+{
+ int ret;
+
+ memset(server, 0, sizeof(*server));
+ server->verbose = verbose;
+
+ ret = snprintf(server->unix_sock_path, sizeof(server->unix_sock_path),
+ "%s", unix_sock_path);
+ if (ret < 0 || ret >= sizeof(server->unix_sock_path)) {
+ IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
+ return -1;
+ }
+ ret = snprintf(server->shm_path, sizeof(server->shm_path),
+ "%s", shm_path);
+ if (ret < 0 || ret >= sizeof(server->shm_path)) {
+ IVSHMEM_SERVER_DEBUG(server, "could not copy shm path\n");
+ return -1;
+ }
+
+ server->use_shm_open = use_shm_open;
+ server->shm_size = shm_size;
+ server->n_vectors = n_vectors;
+
+ QTAILQ_INIT(&server->peer_list);
+
+ return 0;
+}
+
+/* open shm, create and bind to the unix socket */
+int
+ivshmem_server_start(IvshmemServer *server)
+{
+ struct sockaddr_un sun;
+ int shm_fd, sock_fd, ret;
+
+ /* open shm file */
+ if (server->use_shm_open) {
+ IVSHMEM_SERVER_DEBUG(server, "Using POSIX shared memory: %s\n",
+ server->shm_path);
+ shm_fd = shm_open(server->shm_path, O_CREAT | O_RDWR, S_IRWXU);
+ } else {
+ gchar *filename = g_strdup_printf("%s/ivshmem.XXXXXX", server->shm_path);
+ IVSHMEM_SERVER_DEBUG(server, "Using file-backed shared memory: %s\n",
+ server->shm_path);
+ shm_fd = mkstemp(filename);
+ unlink(filename);
+ g_free(filename);
+ }
+
+ if (shm_fd < 0) {
+ fprintf(stderr, "cannot open shm file %s: %s\n", server->shm_path,
+ strerror(errno));
+ return -1;
+ }
+ if (ivshmem_server_ftruncate(shm_fd, server->shm_size) < 0) {
+ fprintf(stderr, "ftruncate(%s) failed: %s\n", server->shm_path,
+ strerror(errno));
+ goto err_close_shm;
+ }
+
+ IVSHMEM_SERVER_DEBUG(server, "create & bind socket %s\n",
+ server->unix_sock_path);
+
+ /* create the unix listening socket */
+ sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock_fd < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot create socket: %s\n",
+ strerror(errno));
+ goto err_close_shm;
+ }
+
+ sun.sun_family = AF_UNIX;
+ ret = snprintf(sun.sun_path, sizeof(sun.sun_path), "%s",
+ server->unix_sock_path);
+ if (ret < 0 || ret >= sizeof(sun.sun_path)) {
+ IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
+ goto err_close_sock;
+ }
+ if (bind(sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot connect to %s: %s\n", sun.sun_path,
+ strerror(errno));
+ goto err_close_sock;
+ }
+
+ if (listen(sock_fd, IVSHMEM_SERVER_LISTEN_BACKLOG) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "listen() failed: %s\n", strerror(errno));
+ goto err_close_sock;
+ }
+
+ server->sock_fd = sock_fd;
+ server->shm_fd = shm_fd;
+
+ return 0;
+
+err_close_sock:
+ close(sock_fd);
+err_close_shm:
+ close(shm_fd);
+ return -1;
+}
+
+/* close connections to clients, the unix socket and the shm fd */
+void
+ivshmem_server_close(IvshmemServer *server)
+{
+ IvshmemServerPeer *peer, *npeer;
+
+ IVSHMEM_SERVER_DEBUG(server, "close server\n");
+
+ QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, npeer) {
+ ivshmem_server_free_peer(server, peer);
+ }
+
+ unlink(server->unix_sock_path);
+ close(server->sock_fd);
+ close(server->shm_fd);
+ server->sock_fd = -1;
+ server->shm_fd = -1;
+}
+
+/* get the fd_set according to the unix socket and the peer list */
+void
+ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd)
+{
+ IvshmemServerPeer *peer;
+
+ if (server->sock_fd == -1) {
+ return;
+ }
+
+ FD_SET(server->sock_fd, fds);
+ if (server->sock_fd >= *maxfd) {
+ *maxfd = server->sock_fd + 1;
+ }
+
+ QTAILQ_FOREACH(peer, &server->peer_list, next) {
+ FD_SET(peer->sock_fd, fds);
+ if (peer->sock_fd >= *maxfd) {
+ *maxfd = peer->sock_fd + 1;
+ }
+ }
+}
+
+/* process incoming messages on the sockets in fd_set */
+int
+ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd)
+{
+ IvshmemServerPeer *peer, *peer_next;
+
+ if (server->sock_fd < maxfd && FD_ISSET(server->sock_fd, fds) &&
+ ivshmem_server_handle_new_conn(server) < 0 && errno != EINTR) {
+ IVSHMEM_SERVER_DEBUG(server, "ivshmem_server_handle_new_conn() "
+ "failed\n");
+ return -1;
+ }
+
+ QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, peer_next) {
+ /* any message from a peer socket result in a close() */
+ IVSHMEM_SERVER_DEBUG(server, "peer->sock_fd=%d\n", peer->sock_fd);
+ if (peer->sock_fd < maxfd && FD_ISSET(peer->sock_fd, fds)) {
+ ivshmem_server_free_peer(server, peer);
+ }
+ }
+
+ return 0;
+}
+
+/* lookup peer from its id */
+IvshmemServerPeer *
+ivshmem_server_search_peer(IvshmemServer *server, int64_t peer_id)
+{
+ IvshmemServerPeer *peer;
+
+ QTAILQ_FOREACH(peer, &server->peer_list, next) {
+ if (peer->id == peer_id) {
+ return peer;
+ }
+ }
+ return NULL;
+}
+
+/* dump our info, the list of peers their vectors on stdout */
+void
+ivshmem_server_dump(const IvshmemServer *server)
+{
+ const IvshmemServerPeer *peer;
+ unsigned vector;
+
+ /* dump peers */
+ QTAILQ_FOREACH(peer, &server->peer_list, next) {
+ printf("peer_id = %" PRId64 "\n", peer->id);
+
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ printf(" vector %d is enabled (fd=%d)\n", vector,
+ event_notifier_get_fd(&peer->vectors[vector]));
+ }
+ }
+}
diff --git a/qemu/contrib/ivshmem-server/ivshmem-server.h b/qemu/contrib/ivshmem-server/ivshmem-server.h
new file mode 100644
index 000000000..385163961
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/ivshmem-server.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef _IVSHMEM_SERVER_H_
+#define _IVSHMEM_SERVER_H_
+
+/**
+ * The ivshmem server is a daemon that creates a unix socket in listen
+ * mode. The ivshmem clients (qemu or ivshmem-client) connect to this
+ * unix socket. For each client, the server will create some eventfd
+ * (see EVENTFD(2)), one per vector. These fd are transmitted to all
+ * clients using the SCM_RIGHTS cmsg message. Therefore, each client is
+ * able to send a notification to another client without beeing
+ * "profixied" by the server.
+ *
+ * We use this mechanism to send interruptions between guests.
+ * qemu is able to transform an event on a eventfd into a PCI MSI-x
+ * interruption in the guest.
+ *
+ * The ivshmem server is also able to share the file descriptor
+ * associated to the ivshmem shared memory.
+ */
+
+#include <sys/select.h>
+
+#include "qemu/event_notifier.h"
+#include "qemu/queue.h"
+#include "hw/misc/ivshmem.h"
+
+/**
+ * Maximum number of notification vectors supported by the server
+ */
+#define IVSHMEM_SERVER_MAX_VECTORS 64
+
+/**
+ * Structure storing a peer
+ *
+ * Each time a client connects to an ivshmem server, a new
+ * IvshmemServerPeer structure is created. This peer and all its
+ * vectors are advertised to all connected clients through the connected
+ * unix sockets.
+ */
+typedef struct IvshmemServerPeer {
+ QTAILQ_ENTRY(IvshmemServerPeer) next; /**< next in list*/
+ int sock_fd; /**< connected unix sock */
+ int64_t id; /**< the id of the peer */
+ EventNotifier vectors[IVSHMEM_SERVER_MAX_VECTORS]; /**< one per vector */
+ unsigned vectors_count; /**< number of vectors */
+} IvshmemServerPeer;
+QTAILQ_HEAD(IvshmemServerPeerList, IvshmemServerPeer);
+
+typedef struct IvshmemServerPeerList IvshmemServerPeerList;
+
+/**
+ * Structure describing an ivshmem server
+ *
+ * This structure stores all information related to our server: the name
+ * of the server unix socket and the list of connected peers.
+ */
+typedef struct IvshmemServer {
+ char unix_sock_path[PATH_MAX]; /**< path to unix socket */
+ int sock_fd; /**< unix sock file descriptor */
+ char shm_path[PATH_MAX]; /**< path to shm */
+ bool use_shm_open;
+ size_t shm_size; /**< size of shm */
+ int shm_fd; /**< shm file descriptor */
+ unsigned n_vectors; /**< number of vectors */
+ uint16_t cur_id; /**< id to be given to next client */
+ bool verbose; /**< true in verbose mode */
+ IvshmemServerPeerList peer_list; /**< list of peers */
+} IvshmemServer;
+
+/**
+ * Initialize an ivshmem server
+ *
+ * @server: A pointer to an uninitialized IvshmemServer structure
+ * @unix_sock_path: The pointer to the unix socket file name
+ * @shm_path: Path to the shared memory. The path corresponds to a POSIX
+ * shm name or a hugetlbfs mount point.
+ * @shm_size: Size of shared memory
+ * @n_vectors: Number of interrupt vectors per client
+ * @verbose: True to enable verbose mode
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int
+ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
+ const char *shm_path, bool use_shm_open,
+ size_t shm_size, unsigned n_vectors,
+ bool verbose);
+
+/**
+ * Open the shm, then create and bind to the unix socket
+ *
+ * @server: The pointer to the initialized IvshmemServer structure
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_server_start(IvshmemServer *server);
+
+/**
+ * Close the server
+ *
+ * Close connections to all clients, close the unix socket and the
+ * shared memory file descriptor. The structure remains initialized, so
+ * it is possible to call ivshmem_server_start() again after a call to
+ * ivshmem_server_close().
+ *
+ * @server: The ivshmem server
+ */
+void ivshmem_server_close(IvshmemServer *server);
+
+/**
+ * Fill a fd_set with file descriptors to be monitored
+ *
+ * This function will fill a fd_set with all file descriptors that must
+ * be polled (unix server socket and peers unix socket). The function
+ * will not initialize the fd_set, it is up to the caller to do it.
+ *
+ * @server: The ivshmem server
+ * @fds: The fd_set to be updated
+ * @maxfd: Must be set to the max file descriptor + 1 in fd_set. This value is
+ * updated if this function adds a greater fd in fd_set.
+ */
+void
+ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd);
+
+/**
+ * Read and handle new messages
+ *
+ * Given a fd_set (for instance filled by a call to select()), handle
+ * incoming messages from peers.
+ *
+ * @server: The ivshmem server
+ * @fds: The fd_set containing the file descriptors to be checked. Note that
+ * file descriptors that are not related to our server are ignored.
+ * @maxfd: The maximum fd in fd_set, plus one.
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd);
+
+/**
+ * Search a peer from its identifier
+ *
+ * @server: The ivshmem server
+ * @peer_id: The identifier of the peer structure
+ *
+ * Returns: The peer structure, or NULL if not found
+ */
+IvshmemServerPeer *
+ivshmem_server_search_peer(IvshmemServer *server, int64_t peer_id);
+
+/**
+ * Dump information of this ivshmem server and its peers on stdout
+ *
+ * @server: The ivshmem server
+ */
+void ivshmem_server_dump(const IvshmemServer *server);
+
+#endif /* _IVSHMEM_SERVER_H_ */
diff --git a/qemu/contrib/ivshmem-server/main.c b/qemu/contrib/ivshmem-server/main.c
new file mode 100644
index 000000000..45776d8af
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/main.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+
+#include "ivshmem-server.h"
+
+#define IVSHMEM_SERVER_DEFAULT_VERBOSE 0
+#define IVSHMEM_SERVER_DEFAULT_FOREGROUND 0
+#define IVSHMEM_SERVER_DEFAULT_PID_FILE "/var/run/ivshmem-server.pid"
+#define IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
+#define IVSHMEM_SERVER_DEFAULT_SHM_PATH "ivshmem"
+#define IVSHMEM_SERVER_DEFAULT_SHM_SIZE (4*1024*1024)
+#define IVSHMEM_SERVER_DEFAULT_N_VECTORS 1
+
+/* used to quit on signal SIGTERM */
+static int ivshmem_server_quit;
+
+/* arguments given by the user */
+typedef struct IvshmemServerArgs {
+ bool verbose;
+ bool foreground;
+ const char *pid_file;
+ const char *unix_socket_path;
+ const char *shm_path;
+ bool use_shm_open;
+ uint64_t shm_size;
+ unsigned n_vectors;
+} IvshmemServerArgs;
+
+static void
+ivshmem_server_usage(const char *progname)
+{
+ printf("Usage: %s [OPTION]...\n"
+ " -h: show this help\n"
+ " -v: verbose mode\n"
+ " -F: foreground mode (default is to daemonize)\n"
+ " -p <pid-file>: path to the PID file (used in daemon mode only)\n"
+ " default " IVSHMEM_SERVER_DEFAULT_PID_FILE "\n"
+ " -S <unix-socket-path>: path to the unix socket to listen to\n"
+ " default " IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH "\n"
+ " -M <shm-name>: POSIX shared memory object to use\n"
+ " default " IVSHMEM_SERVER_DEFAULT_SHM_PATH "\n"
+ " -m <dir-name>: where to create shared memory\n"
+ " -l <size>: size of shared memory in bytes\n"
+ " suffixes K, M and G can be used, e.g. 1K means 1024\n"
+ " default %u\n"
+ " -n <nvectors>: number of vectors\n"
+ " default %u\n",
+ progname, IVSHMEM_SERVER_DEFAULT_SHM_SIZE,
+ IVSHMEM_SERVER_DEFAULT_N_VECTORS);
+}
+
+static void
+ivshmem_server_help(const char *progname)
+{
+ fprintf(stderr, "Try '%s -h' for more information.\n", progname);
+}
+
+/* parse the program arguments, exit on error */
+static void
+ivshmem_server_parse_args(IvshmemServerArgs *args, int argc, char *argv[])
+{
+ int c;
+ unsigned long long v;
+ Error *err = NULL;
+
+ while ((c = getopt(argc, argv, "hvFp:S:m:M:l:n:")) != -1) {
+
+ switch (c) {
+ case 'h': /* help */
+ ivshmem_server_usage(argv[0]);
+ exit(0);
+ break;
+
+ case 'v': /* verbose */
+ args->verbose = 1;
+ break;
+
+ case 'F': /* foreground */
+ args->foreground = 1;
+ break;
+
+ case 'p': /* pid file */
+ args->pid_file = optarg;
+ break;
+
+ case 'S': /* unix socket path */
+ args->unix_socket_path = optarg;
+ break;
+
+ case 'M': /* shm name */
+ case 'm': /* dir name */
+ args->shm_path = optarg;
+ args->use_shm_open = c == 'M';
+ break;
+
+ case 'l': /* shm size */
+ parse_option_size("shm_size", optarg, &args->shm_size, &err);
+ if (err) {
+ error_report_err(err);
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+ break;
+
+ case 'n': /* number of vectors */
+ if (parse_uint_full(optarg, &v, 0) < 0) {
+ fprintf(stderr, "cannot parse n_vectors\n");
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+ args->n_vectors = v;
+ break;
+
+ default:
+ ivshmem_server_usage(argv[0]);
+ exit(1);
+ break;
+ }
+ }
+
+ if (args->n_vectors > IVSHMEM_SERVER_MAX_VECTORS) {
+ fprintf(stderr, "too many requested vectors (max is %d)\n",
+ IVSHMEM_SERVER_MAX_VECTORS);
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+
+ if (args->verbose == 1 && args->foreground == 0) {
+ fprintf(stderr, "cannot use verbose in daemon mode\n");
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+}
+
+/* wait for events on listening server unix socket and connected client
+ * sockets */
+static int
+ivshmem_server_poll_events(IvshmemServer *server)
+{
+ fd_set fds;
+ int ret = 0, maxfd;
+
+ while (!ivshmem_server_quit) {
+
+ FD_ZERO(&fds);
+ maxfd = 0;
+ ivshmem_server_get_fds(server, &fds, &maxfd);
+
+ ret = select(maxfd, &fds, NULL, NULL, NULL);
+
+ if (ret < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+
+ fprintf(stderr, "select error: %s\n", strerror(errno));
+ break;
+ }
+ if (ret == 0) {
+ continue;
+ }
+
+ if (ivshmem_server_handle_fds(server, &fds, maxfd) < 0) {
+ fprintf(stderr, "ivshmem_server_handle_fds() failed\n");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void
+ivshmem_server_quit_cb(int signum)
+{
+ ivshmem_server_quit = 1;
+}
+
+int
+main(int argc, char *argv[])
+{
+ IvshmemServer server;
+ struct sigaction sa, sa_quit;
+ IvshmemServerArgs args = {
+ .verbose = IVSHMEM_SERVER_DEFAULT_VERBOSE,
+ .foreground = IVSHMEM_SERVER_DEFAULT_FOREGROUND,
+ .pid_file = IVSHMEM_SERVER_DEFAULT_PID_FILE,
+ .unix_socket_path = IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH,
+ .shm_path = IVSHMEM_SERVER_DEFAULT_SHM_PATH,
+ .use_shm_open = true,
+ .shm_size = IVSHMEM_SERVER_DEFAULT_SHM_SIZE,
+ .n_vectors = IVSHMEM_SERVER_DEFAULT_N_VECTORS,
+ };
+ int ret = 1;
+
+ /*
+ * Do not remove this notice without adding proper error handling!
+ * Start with handling ivshmem_server_send_one_msg() failure.
+ */
+ printf("*** Example code, do not use in production ***\n");
+
+ /* parse arguments, will exit on error */
+ ivshmem_server_parse_args(&args, argc, argv);
+
+ /* Ignore SIGPIPE, see this link for more info:
+ * http://www.mail-archive.com/libevent-users@monkey.org/msg01606.html */
+ sa.sa_handler = SIG_IGN;
+ sa.sa_flags = 0;
+ if (sigemptyset(&sa.sa_mask) == -1 ||
+ sigaction(SIGPIPE, &sa, 0) == -1) {
+ perror("failed to ignore SIGPIPE; sigaction");
+ goto err;
+ }
+
+ sa_quit.sa_handler = ivshmem_server_quit_cb;
+ sa_quit.sa_flags = 0;
+ if (sigemptyset(&sa_quit.sa_mask) == -1 ||
+ sigaction(SIGTERM, &sa_quit, 0) == -1) {
+ perror("failed to add SIGTERM handler; sigaction");
+ goto err;
+ }
+
+ /* init the ivshms structure */
+ if (ivshmem_server_init(&server, args.unix_socket_path,
+ args.shm_path, args.use_shm_open,
+ args.shm_size, args.n_vectors, args.verbose) < 0) {
+ fprintf(stderr, "cannot init server\n");
+ goto err;
+ }
+
+ /* start the ivshmem server (open shm & unix socket) */
+ if (ivshmem_server_start(&server) < 0) {
+ fprintf(stderr, "cannot bind\n");
+ goto err;
+ }
+
+ /* daemonize if asked to */
+ if (!args.foreground) {
+ FILE *fp;
+
+ if (qemu_daemon(1, 1) < 0) {
+ fprintf(stderr, "cannot daemonize: %s\n", strerror(errno));
+ goto err_close;
+ }
+
+ /* write pid file */
+ fp = fopen(args.pid_file, "w");
+ if (fp == NULL) {
+ fprintf(stderr, "cannot write pid file: %s\n", strerror(errno));
+ goto err_close;
+ }
+
+ fprintf(fp, "%d\n", (int) getpid());
+ fclose(fp);
+ }
+
+ ivshmem_server_poll_events(&server);
+ fprintf(stdout, "server disconnected\n");
+ ret = 0;
+
+err_close:
+ ivshmem_server_close(&server);
+err:
+ return ret;
+}