summaryrefslogtreecommitdiffstats
path: root/qemu/contrib/ivshmem-server
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/contrib/ivshmem-server
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/contrib/ivshmem-server')
-rw-r--r--qemu/contrib/ivshmem-server/Makefile.objs1
-rw-r--r--qemu/contrib/ivshmem-server/ivshmem-server.c457
-rw-r--r--qemu/contrib/ivshmem-server/ivshmem-server.h166
-rw-r--r--qemu/contrib/ivshmem-server/main.c273
4 files changed, 897 insertions, 0 deletions
diff --git a/qemu/contrib/ivshmem-server/Makefile.objs b/qemu/contrib/ivshmem-server/Makefile.objs
new file mode 100644
index 000000000..c060dd369
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/Makefile.objs
@@ -0,0 +1 @@
+ivshmem-server-obj-y = ivshmem-server.o main.o
diff --git a/qemu/contrib/ivshmem-server/ivshmem-server.c b/qemu/contrib/ivshmem-server/ivshmem-server.c
new file mode 100644
index 000000000..172db78b3
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/ivshmem-server.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/sockets.h"
+
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "ivshmem-server.h"
+
+/* log a message on stdout if verbose=1 */
+#define IVSHMEM_SERVER_DEBUG(server, fmt, ...) do { \
+ if ((server)->verbose) { \
+ printf(fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/** maximum size of a huge page, used by ivshmem_server_ftruncate() */
+#define IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE (1024 * 1024 * 1024)
+
+/** default listen backlog (number of sockets not accepted) */
+#define IVSHMEM_SERVER_LISTEN_BACKLOG 10
+
+/* send message to a client unix socket */
+static int
+ivshmem_server_send_one_msg(int sock_fd, int64_t peer_id, int fd)
+{
+ int ret;
+ struct msghdr msg;
+ struct iovec iov[1];
+ union {
+ struct cmsghdr cmsg;
+ char control[CMSG_SPACE(sizeof(int))];
+ } msg_control;
+ struct cmsghdr *cmsg;
+
+ peer_id = GINT64_TO_LE(peer_id);
+ iov[0].iov_base = &peer_id;
+ iov[0].iov_len = sizeof(peer_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+
+ /* if fd is specified, add it in a cmsg */
+ if (fd >= 0) {
+ memset(&msg_control, 0, sizeof(msg_control));
+ msg.msg_control = &msg_control;
+ msg.msg_controllen = sizeof(msg_control);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd));
+ }
+
+ ret = sendmsg(sock_fd, &msg, 0);
+ if (ret <= 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* free a peer when the server advertises a disconnection or when the
+ * server is freed */
+static void
+ivshmem_server_free_peer(IvshmemServer *server, IvshmemServerPeer *peer)
+{
+ unsigned vector;
+ IvshmemServerPeer *other_peer;
+
+ IVSHMEM_SERVER_DEBUG(server, "free peer %" PRId64 "\n", peer->id);
+ close(peer->sock_fd);
+ QTAILQ_REMOVE(&server->peer_list, peer, next);
+
+ /* advertise the deletion to other peers */
+ QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
+ ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id, -1);
+ }
+
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ event_notifier_cleanup(&peer->vectors[vector]);
+ }
+
+ g_free(peer);
+}
+
+/* send the peer id and the shm_fd just after a new client connection */
+static int
+ivshmem_server_send_initial_info(IvshmemServer *server, IvshmemServerPeer *peer)
+{
+ int ret;
+
+ /* send our protocol version first */
+ ret = ivshmem_server_send_one_msg(peer->sock_fd, IVSHMEM_PROTOCOL_VERSION,
+ -1);
+ if (ret < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send version: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /* send the peer id to the client */
+ ret = ivshmem_server_send_one_msg(peer->sock_fd, peer->id, -1);
+ if (ret < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send peer id: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /* send the shm_fd */
+ ret = ivshmem_server_send_one_msg(peer->sock_fd, -1, server->shm_fd);
+ if (ret < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send shm fd: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* handle message on listening unix socket (new client connection) */
+static int
+ivshmem_server_handle_new_conn(IvshmemServer *server)
+{
+ IvshmemServerPeer *peer, *other_peer;
+ struct sockaddr_un unaddr;
+ socklen_t unaddr_len;
+ int newfd;
+ unsigned i;
+
+ /* accept the incoming connection */
+ unaddr_len = sizeof(unaddr);
+ newfd = qemu_accept(server->sock_fd,
+ (struct sockaddr *)&unaddr, &unaddr_len);
+
+ if (newfd < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot accept() %s\n", strerror(errno));
+ return -1;
+ }
+
+ qemu_set_nonblock(newfd);
+ IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd);
+
+ /* allocate new structure for this peer */
+ peer = g_malloc0(sizeof(*peer));
+ peer->sock_fd = newfd;
+
+ /* get an unused peer id */
+ /* XXX: this could use id allocation such as Linux IDA, or simply
+ * a free-list */
+ for (i = 0; i < G_MAXUINT16; i++) {
+ if (ivshmem_server_search_peer(server, server->cur_id) == NULL) {
+ break;
+ }
+ server->cur_id++;
+ }
+ if (i == G_MAXUINT16) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot allocate new client id\n");
+ close(newfd);
+ g_free(peer);
+ return -1;
+ }
+ peer->id = server->cur_id++;
+
+ /* create eventfd, one per vector */
+ peer->vectors_count = server->n_vectors;
+ for (i = 0; i < peer->vectors_count; i++) {
+ if (event_notifier_init(&peer->vectors[i], FALSE) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot create eventfd\n");
+ goto fail;
+ }
+ }
+
+ /* send peer id and shm fd */
+ if (ivshmem_server_send_initial_info(server, peer) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot send initial info\n");
+ goto fail;
+ }
+
+ /* advertise the new peer to others */
+ QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
+ for (i = 0; i < peer->vectors_count; i++) {
+ ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id,
+ peer->vectors[i].wfd);
+ }
+ }
+
+ /* advertise the other peers to the new one */
+ QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
+ for (i = 0; i < peer->vectors_count; i++) {
+ ivshmem_server_send_one_msg(peer->sock_fd, other_peer->id,
+ other_peer->vectors[i].wfd);
+ }
+ }
+
+ /* advertise the new peer to itself */
+ for (i = 0; i < peer->vectors_count; i++) {
+ ivshmem_server_send_one_msg(peer->sock_fd, peer->id,
+ event_notifier_get_fd(&peer->vectors[i]));
+ }
+
+ QTAILQ_INSERT_TAIL(&server->peer_list, peer, next);
+ IVSHMEM_SERVER_DEBUG(server, "new peer id = %" PRId64 "\n",
+ peer->id);
+ return 0;
+
+fail:
+ while (i--) {
+ event_notifier_cleanup(&peer->vectors[i]);
+ }
+ close(newfd);
+ g_free(peer);
+ return -1;
+}
+
+/* Try to ftruncate a file to next power of 2 of shmsize.
+ * If it fails; all power of 2 above shmsize are tested until
+ * we reach the maximum huge page size. This is useful
+ * if the shm file is in a hugetlbfs that cannot be truncated to the
+ * shm_size value. */
+static int
+ivshmem_server_ftruncate(int fd, unsigned shmsize)
+{
+ int ret;
+ struct stat mapstat;
+
+ /* align shmsize to next power of 2 */
+ shmsize = pow2ceil(shmsize);
+
+ if (fstat(fd, &mapstat) != -1 && mapstat.st_size == shmsize) {
+ return 0;
+ }
+
+ while (shmsize <= IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE) {
+ ret = ftruncate(fd, shmsize);
+ if (ret == 0) {
+ return ret;
+ }
+ shmsize *= 2;
+ }
+
+ return -1;
+}
+
+/* Init a new ivshmem server */
+int
+ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
+ const char *shm_path, bool use_shm_open,
+ size_t shm_size, unsigned n_vectors,
+ bool verbose)
+{
+ int ret;
+
+ memset(server, 0, sizeof(*server));
+ server->verbose = verbose;
+
+ ret = snprintf(server->unix_sock_path, sizeof(server->unix_sock_path),
+ "%s", unix_sock_path);
+ if (ret < 0 || ret >= sizeof(server->unix_sock_path)) {
+ IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
+ return -1;
+ }
+ ret = snprintf(server->shm_path, sizeof(server->shm_path),
+ "%s", shm_path);
+ if (ret < 0 || ret >= sizeof(server->shm_path)) {
+ IVSHMEM_SERVER_DEBUG(server, "could not copy shm path\n");
+ return -1;
+ }
+
+ server->use_shm_open = use_shm_open;
+ server->shm_size = shm_size;
+ server->n_vectors = n_vectors;
+
+ QTAILQ_INIT(&server->peer_list);
+
+ return 0;
+}
+
+/* open shm, create and bind to the unix socket */
+int
+ivshmem_server_start(IvshmemServer *server)
+{
+ struct sockaddr_un sun;
+ int shm_fd, sock_fd, ret;
+
+ /* open shm file */
+ if (server->use_shm_open) {
+ IVSHMEM_SERVER_DEBUG(server, "Using POSIX shared memory: %s\n",
+ server->shm_path);
+ shm_fd = shm_open(server->shm_path, O_CREAT | O_RDWR, S_IRWXU);
+ } else {
+ gchar *filename = g_strdup_printf("%s/ivshmem.XXXXXX", server->shm_path);
+ IVSHMEM_SERVER_DEBUG(server, "Using file-backed shared memory: %s\n",
+ server->shm_path);
+ shm_fd = mkstemp(filename);
+ unlink(filename);
+ g_free(filename);
+ }
+
+ if (shm_fd < 0) {
+ fprintf(stderr, "cannot open shm file %s: %s\n", server->shm_path,
+ strerror(errno));
+ return -1;
+ }
+ if (ivshmem_server_ftruncate(shm_fd, server->shm_size) < 0) {
+ fprintf(stderr, "ftruncate(%s) failed: %s\n", server->shm_path,
+ strerror(errno));
+ goto err_close_shm;
+ }
+
+ IVSHMEM_SERVER_DEBUG(server, "create & bind socket %s\n",
+ server->unix_sock_path);
+
+ /* create the unix listening socket */
+ sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock_fd < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot create socket: %s\n",
+ strerror(errno));
+ goto err_close_shm;
+ }
+
+ sun.sun_family = AF_UNIX;
+ ret = snprintf(sun.sun_path, sizeof(sun.sun_path), "%s",
+ server->unix_sock_path);
+ if (ret < 0 || ret >= sizeof(sun.sun_path)) {
+ IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
+ goto err_close_sock;
+ }
+ if (bind(sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "cannot connect to %s: %s\n", sun.sun_path,
+ strerror(errno));
+ goto err_close_sock;
+ }
+
+ if (listen(sock_fd, IVSHMEM_SERVER_LISTEN_BACKLOG) < 0) {
+ IVSHMEM_SERVER_DEBUG(server, "listen() failed: %s\n", strerror(errno));
+ goto err_close_sock;
+ }
+
+ server->sock_fd = sock_fd;
+ server->shm_fd = shm_fd;
+
+ return 0;
+
+err_close_sock:
+ close(sock_fd);
+err_close_shm:
+ close(shm_fd);
+ return -1;
+}
+
+/* close connections to clients, the unix socket and the shm fd */
+void
+ivshmem_server_close(IvshmemServer *server)
+{
+ IvshmemServerPeer *peer, *npeer;
+
+ IVSHMEM_SERVER_DEBUG(server, "close server\n");
+
+ QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, npeer) {
+ ivshmem_server_free_peer(server, peer);
+ }
+
+ unlink(server->unix_sock_path);
+ close(server->sock_fd);
+ close(server->shm_fd);
+ server->sock_fd = -1;
+ server->shm_fd = -1;
+}
+
+/* get the fd_set according to the unix socket and the peer list */
+void
+ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd)
+{
+ IvshmemServerPeer *peer;
+
+ if (server->sock_fd == -1) {
+ return;
+ }
+
+ FD_SET(server->sock_fd, fds);
+ if (server->sock_fd >= *maxfd) {
+ *maxfd = server->sock_fd + 1;
+ }
+
+ QTAILQ_FOREACH(peer, &server->peer_list, next) {
+ FD_SET(peer->sock_fd, fds);
+ if (peer->sock_fd >= *maxfd) {
+ *maxfd = peer->sock_fd + 1;
+ }
+ }
+}
+
+/* process incoming messages on the sockets in fd_set */
+int
+ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd)
+{
+ IvshmemServerPeer *peer, *peer_next;
+
+ if (server->sock_fd < maxfd && FD_ISSET(server->sock_fd, fds) &&
+ ivshmem_server_handle_new_conn(server) < 0 && errno != EINTR) {
+ IVSHMEM_SERVER_DEBUG(server, "ivshmem_server_handle_new_conn() "
+ "failed\n");
+ return -1;
+ }
+
+ QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, peer_next) {
+ /* any message from a peer socket result in a close() */
+ IVSHMEM_SERVER_DEBUG(server, "peer->sock_fd=%d\n", peer->sock_fd);
+ if (peer->sock_fd < maxfd && FD_ISSET(peer->sock_fd, fds)) {
+ ivshmem_server_free_peer(server, peer);
+ }
+ }
+
+ return 0;
+}
+
+/* lookup peer from its id */
+IvshmemServerPeer *
+ivshmem_server_search_peer(IvshmemServer *server, int64_t peer_id)
+{
+ IvshmemServerPeer *peer;
+
+ QTAILQ_FOREACH(peer, &server->peer_list, next) {
+ if (peer->id == peer_id) {
+ return peer;
+ }
+ }
+ return NULL;
+}
+
+/* dump our info, the list of peers their vectors on stdout */
+void
+ivshmem_server_dump(const IvshmemServer *server)
+{
+ const IvshmemServerPeer *peer;
+ unsigned vector;
+
+ /* dump peers */
+ QTAILQ_FOREACH(peer, &server->peer_list, next) {
+ printf("peer_id = %" PRId64 "\n", peer->id);
+
+ for (vector = 0; vector < peer->vectors_count; vector++) {
+ printf(" vector %d is enabled (fd=%d)\n", vector,
+ event_notifier_get_fd(&peer->vectors[vector]));
+ }
+ }
+}
diff --git a/qemu/contrib/ivshmem-server/ivshmem-server.h b/qemu/contrib/ivshmem-server/ivshmem-server.h
new file mode 100644
index 000000000..385163961
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/ivshmem-server.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef _IVSHMEM_SERVER_H_
+#define _IVSHMEM_SERVER_H_
+
+/**
+ * The ivshmem server is a daemon that creates a unix socket in listen
+ * mode. The ivshmem clients (qemu or ivshmem-client) connect to this
+ * unix socket. For each client, the server will create some eventfd
+ * (see EVENTFD(2)), one per vector. These fd are transmitted to all
+ * clients using the SCM_RIGHTS cmsg message. Therefore, each client is
+ * able to send a notification to another client without beeing
+ * "profixied" by the server.
+ *
+ * We use this mechanism to send interruptions between guests.
+ * qemu is able to transform an event on a eventfd into a PCI MSI-x
+ * interruption in the guest.
+ *
+ * The ivshmem server is also able to share the file descriptor
+ * associated to the ivshmem shared memory.
+ */
+
+#include <sys/select.h>
+
+#include "qemu/event_notifier.h"
+#include "qemu/queue.h"
+#include "hw/misc/ivshmem.h"
+
+/**
+ * Maximum number of notification vectors supported by the server
+ */
+#define IVSHMEM_SERVER_MAX_VECTORS 64
+
+/**
+ * Structure storing a peer
+ *
+ * Each time a client connects to an ivshmem server, a new
+ * IvshmemServerPeer structure is created. This peer and all its
+ * vectors are advertised to all connected clients through the connected
+ * unix sockets.
+ */
+typedef struct IvshmemServerPeer {
+ QTAILQ_ENTRY(IvshmemServerPeer) next; /**< next in list*/
+ int sock_fd; /**< connected unix sock */
+ int64_t id; /**< the id of the peer */
+ EventNotifier vectors[IVSHMEM_SERVER_MAX_VECTORS]; /**< one per vector */
+ unsigned vectors_count; /**< number of vectors */
+} IvshmemServerPeer;
+QTAILQ_HEAD(IvshmemServerPeerList, IvshmemServerPeer);
+
+typedef struct IvshmemServerPeerList IvshmemServerPeerList;
+
+/**
+ * Structure describing an ivshmem server
+ *
+ * This structure stores all information related to our server: the name
+ * of the server unix socket and the list of connected peers.
+ */
+typedef struct IvshmemServer {
+ char unix_sock_path[PATH_MAX]; /**< path to unix socket */
+ int sock_fd; /**< unix sock file descriptor */
+ char shm_path[PATH_MAX]; /**< path to shm */
+ bool use_shm_open;
+ size_t shm_size; /**< size of shm */
+ int shm_fd; /**< shm file descriptor */
+ unsigned n_vectors; /**< number of vectors */
+ uint16_t cur_id; /**< id to be given to next client */
+ bool verbose; /**< true in verbose mode */
+ IvshmemServerPeerList peer_list; /**< list of peers */
+} IvshmemServer;
+
+/**
+ * Initialize an ivshmem server
+ *
+ * @server: A pointer to an uninitialized IvshmemServer structure
+ * @unix_sock_path: The pointer to the unix socket file name
+ * @shm_path: Path to the shared memory. The path corresponds to a POSIX
+ * shm name or a hugetlbfs mount point.
+ * @shm_size: Size of shared memory
+ * @n_vectors: Number of interrupt vectors per client
+ * @verbose: True to enable verbose mode
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int
+ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
+ const char *shm_path, bool use_shm_open,
+ size_t shm_size, unsigned n_vectors,
+ bool verbose);
+
+/**
+ * Open the shm, then create and bind to the unix socket
+ *
+ * @server: The pointer to the initialized IvshmemServer structure
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_server_start(IvshmemServer *server);
+
+/**
+ * Close the server
+ *
+ * Close connections to all clients, close the unix socket and the
+ * shared memory file descriptor. The structure remains initialized, so
+ * it is possible to call ivshmem_server_start() again after a call to
+ * ivshmem_server_close().
+ *
+ * @server: The ivshmem server
+ */
+void ivshmem_server_close(IvshmemServer *server);
+
+/**
+ * Fill a fd_set with file descriptors to be monitored
+ *
+ * This function will fill a fd_set with all file descriptors that must
+ * be polled (unix server socket and peers unix socket). The function
+ * will not initialize the fd_set, it is up to the caller to do it.
+ *
+ * @server: The ivshmem server
+ * @fds: The fd_set to be updated
+ * @maxfd: Must be set to the max file descriptor + 1 in fd_set. This value is
+ * updated if this function adds a greater fd in fd_set.
+ */
+void
+ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd);
+
+/**
+ * Read and handle new messages
+ *
+ * Given a fd_set (for instance filled by a call to select()), handle
+ * incoming messages from peers.
+ *
+ * @server: The ivshmem server
+ * @fds: The fd_set containing the file descriptors to be checked. Note that
+ * file descriptors that are not related to our server are ignored.
+ * @maxfd: The maximum fd in fd_set, plus one.
+ *
+ * Returns: 0 on success, or a negative value on error
+ */
+int ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd);
+
+/**
+ * Search a peer from its identifier
+ *
+ * @server: The ivshmem server
+ * @peer_id: The identifier of the peer structure
+ *
+ * Returns: The peer structure, or NULL if not found
+ */
+IvshmemServerPeer *
+ivshmem_server_search_peer(IvshmemServer *server, int64_t peer_id);
+
+/**
+ * Dump information of this ivshmem server and its peers on stdout
+ *
+ * @server: The ivshmem server
+ */
+void ivshmem_server_dump(const IvshmemServer *server);
+
+#endif /* _IVSHMEM_SERVER_H_ */
diff --git a/qemu/contrib/ivshmem-server/main.c b/qemu/contrib/ivshmem-server/main.c
new file mode 100644
index 000000000..45776d8af
--- /dev/null
+++ b/qemu/contrib/ivshmem-server/main.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 6WIND S.A., 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+
+#include "ivshmem-server.h"
+
+#define IVSHMEM_SERVER_DEFAULT_VERBOSE 0
+#define IVSHMEM_SERVER_DEFAULT_FOREGROUND 0
+#define IVSHMEM_SERVER_DEFAULT_PID_FILE "/var/run/ivshmem-server.pid"
+#define IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
+#define IVSHMEM_SERVER_DEFAULT_SHM_PATH "ivshmem"
+#define IVSHMEM_SERVER_DEFAULT_SHM_SIZE (4*1024*1024)
+#define IVSHMEM_SERVER_DEFAULT_N_VECTORS 1
+
+/* used to quit on signal SIGTERM */
+static int ivshmem_server_quit;
+
+/* arguments given by the user */
+typedef struct IvshmemServerArgs {
+ bool verbose;
+ bool foreground;
+ const char *pid_file;
+ const char *unix_socket_path;
+ const char *shm_path;
+ bool use_shm_open;
+ uint64_t shm_size;
+ unsigned n_vectors;
+} IvshmemServerArgs;
+
+static void
+ivshmem_server_usage(const char *progname)
+{
+ printf("Usage: %s [OPTION]...\n"
+ " -h: show this help\n"
+ " -v: verbose mode\n"
+ " -F: foreground mode (default is to daemonize)\n"
+ " -p <pid-file>: path to the PID file (used in daemon mode only)\n"
+ " default " IVSHMEM_SERVER_DEFAULT_PID_FILE "\n"
+ " -S <unix-socket-path>: path to the unix socket to listen to\n"
+ " default " IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH "\n"
+ " -M <shm-name>: POSIX shared memory object to use\n"
+ " default " IVSHMEM_SERVER_DEFAULT_SHM_PATH "\n"
+ " -m <dir-name>: where to create shared memory\n"
+ " -l <size>: size of shared memory in bytes\n"
+ " suffixes K, M and G can be used, e.g. 1K means 1024\n"
+ " default %u\n"
+ " -n <nvectors>: number of vectors\n"
+ " default %u\n",
+ progname, IVSHMEM_SERVER_DEFAULT_SHM_SIZE,
+ IVSHMEM_SERVER_DEFAULT_N_VECTORS);
+}
+
+static void
+ivshmem_server_help(const char *progname)
+{
+ fprintf(stderr, "Try '%s -h' for more information.\n", progname);
+}
+
+/* parse the program arguments, exit on error */
+static void
+ivshmem_server_parse_args(IvshmemServerArgs *args, int argc, char *argv[])
+{
+ int c;
+ unsigned long long v;
+ Error *err = NULL;
+
+ while ((c = getopt(argc, argv, "hvFp:S:m:M:l:n:")) != -1) {
+
+ switch (c) {
+ case 'h': /* help */
+ ivshmem_server_usage(argv[0]);
+ exit(0);
+ break;
+
+ case 'v': /* verbose */
+ args->verbose = 1;
+ break;
+
+ case 'F': /* foreground */
+ args->foreground = 1;
+ break;
+
+ case 'p': /* pid file */
+ args->pid_file = optarg;
+ break;
+
+ case 'S': /* unix socket path */
+ args->unix_socket_path = optarg;
+ break;
+
+ case 'M': /* shm name */
+ case 'm': /* dir name */
+ args->shm_path = optarg;
+ args->use_shm_open = c == 'M';
+ break;
+
+ case 'l': /* shm size */
+ parse_option_size("shm_size", optarg, &args->shm_size, &err);
+ if (err) {
+ error_report_err(err);
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+ break;
+
+ case 'n': /* number of vectors */
+ if (parse_uint_full(optarg, &v, 0) < 0) {
+ fprintf(stderr, "cannot parse n_vectors\n");
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+ args->n_vectors = v;
+ break;
+
+ default:
+ ivshmem_server_usage(argv[0]);
+ exit(1);
+ break;
+ }
+ }
+
+ if (args->n_vectors > IVSHMEM_SERVER_MAX_VECTORS) {
+ fprintf(stderr, "too many requested vectors (max is %d)\n",
+ IVSHMEM_SERVER_MAX_VECTORS);
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+
+ if (args->verbose == 1 && args->foreground == 0) {
+ fprintf(stderr, "cannot use verbose in daemon mode\n");
+ ivshmem_server_help(argv[0]);
+ exit(1);
+ }
+}
+
+/* wait for events on listening server unix socket and connected client
+ * sockets */
+static int
+ivshmem_server_poll_events(IvshmemServer *server)
+{
+ fd_set fds;
+ int ret = 0, maxfd;
+
+ while (!ivshmem_server_quit) {
+
+ FD_ZERO(&fds);
+ maxfd = 0;
+ ivshmem_server_get_fds(server, &fds, &maxfd);
+
+ ret = select(maxfd, &fds, NULL, NULL, NULL);
+
+ if (ret < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+
+ fprintf(stderr, "select error: %s\n", strerror(errno));
+ break;
+ }
+ if (ret == 0) {
+ continue;
+ }
+
+ if (ivshmem_server_handle_fds(server, &fds, maxfd) < 0) {
+ fprintf(stderr, "ivshmem_server_handle_fds() failed\n");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void
+ivshmem_server_quit_cb(int signum)
+{
+ ivshmem_server_quit = 1;
+}
+
+int
+main(int argc, char *argv[])
+{
+ IvshmemServer server;
+ struct sigaction sa, sa_quit;
+ IvshmemServerArgs args = {
+ .verbose = IVSHMEM_SERVER_DEFAULT_VERBOSE,
+ .foreground = IVSHMEM_SERVER_DEFAULT_FOREGROUND,
+ .pid_file = IVSHMEM_SERVER_DEFAULT_PID_FILE,
+ .unix_socket_path = IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH,
+ .shm_path = IVSHMEM_SERVER_DEFAULT_SHM_PATH,
+ .use_shm_open = true,
+ .shm_size = IVSHMEM_SERVER_DEFAULT_SHM_SIZE,
+ .n_vectors = IVSHMEM_SERVER_DEFAULT_N_VECTORS,
+ };
+ int ret = 1;
+
+ /*
+ * Do not remove this notice without adding proper error handling!
+ * Start with handling ivshmem_server_send_one_msg() failure.
+ */
+ printf("*** Example code, do not use in production ***\n");
+
+ /* parse arguments, will exit on error */
+ ivshmem_server_parse_args(&args, argc, argv);
+
+ /* Ignore SIGPIPE, see this link for more info:
+ * http://www.mail-archive.com/libevent-users@monkey.org/msg01606.html */
+ sa.sa_handler = SIG_IGN;
+ sa.sa_flags = 0;
+ if (sigemptyset(&sa.sa_mask) == -1 ||
+ sigaction(SIGPIPE, &sa, 0) == -1) {
+ perror("failed to ignore SIGPIPE; sigaction");
+ goto err;
+ }
+
+ sa_quit.sa_handler = ivshmem_server_quit_cb;
+ sa_quit.sa_flags = 0;
+ if (sigemptyset(&sa_quit.sa_mask) == -1 ||
+ sigaction(SIGTERM, &sa_quit, 0) == -1) {
+ perror("failed to add SIGTERM handler; sigaction");
+ goto err;
+ }
+
+ /* init the ivshms structure */
+ if (ivshmem_server_init(&server, args.unix_socket_path,
+ args.shm_path, args.use_shm_open,
+ args.shm_size, args.n_vectors, args.verbose) < 0) {
+ fprintf(stderr, "cannot init server\n");
+ goto err;
+ }
+
+ /* start the ivshmem server (open shm & unix socket) */
+ if (ivshmem_server_start(&server) < 0) {
+ fprintf(stderr, "cannot bind\n");
+ goto err;
+ }
+
+ /* daemonize if asked to */
+ if (!args.foreground) {
+ FILE *fp;
+
+ if (qemu_daemon(1, 1) < 0) {
+ fprintf(stderr, "cannot daemonize: %s\n", strerror(errno));
+ goto err_close;
+ }
+
+ /* write pid file */
+ fp = fopen(args.pid_file, "w");
+ if (fp == NULL) {
+ fprintf(stderr, "cannot write pid file: %s\n", strerror(errno));
+ goto err_close;
+ }
+
+ fprintf(fp, "%d\n", (int) getpid());
+ fclose(fp);
+ }
+
+ ivshmem_server_poll_events(&server);
+ fprintf(stdout, "server disconnected\n");
+ ret = 0;
+
+err_close:
+ ivshmem_server_close(&server);
+err:
+ return ret;
+}