summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO5
-rw-r--r--docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst37
-rw-r--r--docs/development/ngvsrequirements/ngvs-requirements-document.rst266
-rw-r--r--src/cni/ovsdpdk/Dockerfile7
-rw-r--r--src/cni/ovsdpdk/Vagrantfile16
-rwxr-xr-xsrc/cni/ovsdpdk/build_cni_ovsdpdk.sh19
-rw-r--r--src/cni/ovsdpdk/install_cni.sh8
-rw-r--r--src/cni/ovsdpdk/kube_ovsdpdk.yml71
-rw-r--r--src/cni/ovsdpdk/ovsdpdk.patch136
-rwxr-xr-xsrc/cni/ovsdpdk/setup_ovsdpdk.sh14
-rwxr-xr-xsrc/cni/ovsdpdk/teardown_ovsdpdk.sh9
-rw-r--r--src/fuel-plugin/README.md0
-rwxr-xr-xsrc/fuel-plugin/deployment_scripts/k8s-master-install.sh25
-rwxr-xr-xsrc/fuel-plugin/deployment_scripts/k8s-slave-install.sh15
-rw-r--r--src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-master-install.pp22
-rw-r--r--src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-slave-install.pp15
-rw-r--r--src/fuel-plugin/deployment_tasks.yaml44
-rw-r--r--src/fuel-plugin/environment_config.yaml19
-rw-r--r--src/fuel-plugin/metadata.yaml30
-rw-r--r--src/fuel-plugin/node_roles.yaml30
-rw-r--r--src/fuel-plugin/tasks.yaml1
-rw-r--r--src/fuel-plugin/vagrant/Vagrantfile21
-rwxr-xr-xsrc/fuel-plugin/vagrant/build_fuel_plugin.sh8
-rw-r--r--src/vagrant/k8s_kubeadm/Vagrantfile7
-rw-r--r--src/vagrant/k8s_kubeadm/examples/virtio-user.yaml29
-rwxr-xr-xsrc/vagrant/k8s_kubeadm/examples/yardstick.sh4
-rw-r--r--src/vagrant/k8s_kubeadm/host_setup.sh19
-rw-r--r--src/vagrant/k8s_kubeadm/master_setup.sh23
-rw-r--r--src/vagrant/k8s_kubeadm/ovsdpdk/Dockerfile8
-rw-r--r--src/vagrant/k8s_kubeadm/ovsdpdk/install.sh7
-rw-r--r--src/vagrant/k8s_kubeadm/ovsdpdk/kube_ovsdpdk.yml53
-rw-r--r--src/vagrant/k8s_kubeadm/ovsdpdk/start.sh15
-rw-r--r--src/vagrant/k8s_kubeadm/worker_setup.sh22
-rw-r--r--src/vnf/ping/Dockerfile8
-rwxr-xr-xsrc/vnf/ping/start.sh6
-rw-r--r--src/vnf/virtio-user-ping/01-add-single-file.patch171
-rw-r--r--src/vnf/virtio-user-ping/02-fix-nohuge-option.patch19
-rw-r--r--src/vnf/virtio-user-ping/Dockerfile12
-rw-r--r--src/vnf/virtio-user-ping/Vagrantfile22
-rwxr-xr-xsrc/vnf/virtio-user-ping/build_vpp.sh13
-rwxr-xr-xsrc/vnf/virtio-user-ping/setup_ovs_virtio.sh24
-rwxr-xr-xsrc/vnf/virtio-user-ping/setup_virtio_user.sh11
-rwxr-xr-xsrc/vnf/virtio-user-ping/setup_vpp.sh8
-rw-r--r--src/vnf/virtio-user-ping/startup.conf23
-rw-r--r--src/vnf/virtio-user-ping/virtio-user.patch49
45 files changed, 1093 insertions, 278 deletions
diff --git a/INFO b/INFO
index 2f3a086..d074352 100644
--- a/INFO
+++ b/INFO
@@ -20,6 +20,9 @@ wassim.haddad@ericsson.com
heikki.mahkonen@ericsson.com
akapadia@aarnanetworks.com
srupanagunta@gmail.com
+ruijing.guo@gmail.com
Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-12-13-14.59.html
-Link(s) to approval of additional committers: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-04-11-13.59.html
+Link(s) to approval of additional committers:
+http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-04-11-13.59.html
+https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-June/016505.html
diff --git a/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst b/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst
index bff5372..f8fb85a 100644
--- a/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst
+++ b/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst
@@ -14,11 +14,32 @@ analysis with Kubernetes Official Release.
.. table::
:class: longtable
- +-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
- |Use Case / Requirement |Supported in v1.5 |Notes |
- +===========================================================+===================+====================================================================+
- |Manage conainter and virtual machine in the same platform. |No |Kubernetes only manage containers. For this part, we need to setup a|
- | | |platform to manage containers and virtual machine together |
- +-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
- |TBD | | |
- +-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
+ |Use Case / Requirement |Supported in v1.5 |Notes |
+ +===========================================================+===================+=====================================================================================================+
+ |Manage conainter and virtual machine in the same platform. |No | There are some ways how Kubernetes could manage VM-s: |
+ | | | |
+ | | | 1. `Kubevirt <https://github.com/kubevirt/kubevirt>`_ |
+ | | | 2. Kubernetes can start rkt and with |
+ | | | `rkt it is possible to start VM-s <https://coreos.com/rkt/docs/latest/running-kvm-stage1.html>`_ |
+ | | | 3. `Virtlet <https://github.com/Mirantis/virtlet>`_ |
+ | | | 4. `Hypercontainer <https://github.com/kubernetes/frakti>`_ |
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
+ |Kubernetes support multiple networks. |No | As VNF needs at least three interfaces. Management,control plane, data plane. `CNI |
+ | | | <https://github.com/containernetworking/cni/blob/master/SPEC.md>`_ already supports multiple |
+ | | | interfaces in the API definition. |
+ | | | |
+ | | | 1. `Multus <https://github.com/Intel-Corp/multus-cni>`_ |
+ | | | 2. `CNI-Genie <https://github.com/Huawei-PaaS/CNI-Genie>`_ |
+ | | | 3. A `solution built into Kubernetes |
+ | | | <https://docs.google.com/document/d/1TW3P4c8auWwYy-w_5afIPDcGNLK3LZf0m14943eVfVg/>`_ |
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
+ |Kubernetes support NAT-less connections to a container |No | SIP/SDP and SCTP are not working with NAT-ed networks |
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
+ |Kubernetes scheduling support CPU binding,NUMA features |No | The kubernetes schedular don't support these features |
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
+ |DPDK need to support CNI |No | DPDK is the technology to accelerate the data plane. Container need |
+ | | | support it, the same with virtual machine. |
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
+ |SR-IOV can support CNI (Optional) |No | SR-IOV could let container get high performance |
+ +-----------------------------------------------------------+-------------------+-----------------------------------------------------------------------------------------------------+
diff --git a/docs/development/ngvsrequirements/ngvs-requirements-document.rst b/docs/development/ngvsrequirements/ngvs-requirements-document.rst
new file mode 100644
index 0000000..212ee81
--- /dev/null
+++ b/docs/development/ngvsrequirements/ngvs-requirements-document.rst
@@ -0,0 +1,266 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.http://creativecommons.org/licenses/by/4.0
+.. (c) Xuan Jia (China Mobile)
+
+=========================================================================
+OpenRetriever Next Gen VIM & Edge Computing Scheduler Requirements Document
+===========================================================================
+
+Created by the OPNFV OpenRetriever Team
+
+| Amar Kapadia
+| Wassim Haddad
+| Heikki Mahkonen
+| Srinivasa Addepalli
+
+
+| v1.0 5/3/17
+| v1.1 5/16/17
+v1.2 7/26/17
+
+Motivation
+----------
+
+The OpenRetriever team believes that existing and new NFV workloads can
+benefit from a new VIM placement and scheduling component. We further
+believe that these same requirements will be very useful for edge
+computing scheduling. This document aims to document requirements for
+this effort.
+
+By placement and scheduling, we mean:
+
+- Choose which hardware node to run the VNF on factors such as AAA, ML prediction or MANO
+
+- Start the VNF(s) depending on a trigger e.g. receiving requests such as DHCP, DNS or upon data packet or NULL trigger
+
+We use the generic term “scheduler” to refer to the placement and
+scheduling component in the rest of this document. We are not including
+lifecycle management of the VNF in our definition of the scheduler.
+
+At a high level, we believe the VIM scheduler must:
+
+- Support virtual machines, containers and unikernels
+
+- Support legacy and event-driven scheduling
+
+ - By legacy scheduling we mean scheduling without any trigger (see above) i.e. the current technique used by schedulers such as OpenStack Nova.
+ - By event-driven scheduling we mean scheduling with a trigger (see above). We do not mean that the unikernel or container that is going to run the VNF is already running . The instance is started and torn-down in response to traffic. The two step process is transparent to the user.
+ - More specialized higher level schedulers and orchestration systems may be run on top e.g. FaaS (similar to AWS Lambda) etc.
+
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Serverless vs. FaaS vs. Event-Driven Terminology |
+| |
+| Serverless: By serverless, we mean a general PaaS concept where the user does not have to specify which physical or virtual compute resource their code snippet or function will run on. The code snippet/function is executed in response to an event. |
+| |
+| FaaS: We use this term synonymously with serverless. |
+| |
+| Event-Driven: By event-driven, we mean an entire microservice or service (as opposed a code snippet) is executed in response to an event. |
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+- Work in distributed edge environments
+
+Please provide your inputs. Once we have a comprehensive list of
+requirements, we will investigate what the right open source solution
+should be, and how to influence that particular project.
+
+Use cases
+---------
+
+A number of NFV use cases can benefit from a new VIM scheduler:
+
+vCPE
+~~~~
+
+vCPE can benefit from a new scheduler in two ways:
+
+1. uCPE devices have very few cores (4-8 typical). Running statically scheduled VMs is inefficient. An event-driven scheduler would help optimize the hardware resources and increase capacity.
+
+2. vCPE is a bursty NFV use case, where services are not “on” all the time. Legacy provisioning of virtual machines for each VNF significantly reduces resource utilization, which in turn negatively impacts the total-cost-of-ownership (TCO). Recent Intel studies have shown, in certain cases, vCPE saves 30-40% TCO over physical functions. This number is hardly compelling, we believe it needs to be significantly higher to be of any interest. This can be accomplished by increasing utilization, which in turn can be achieved through event-driven scheduling.
+
+IOT/ MEC
+~~~~~~~~
+
+IOT & multi-access edge computing
+(`*MEC* <http://www.etsi.org/technologies-clusters/technologies/multi-access-edge-computing>`__)
+share many of the same characteristics as the uCPE. Though serverless
+functions increase the resource utilization, it does not provide ability
+for application developers to introduce traditional security functions.
+Serverless services that can be brought up on-demand basis provide
+increases resource utilization as well as ability to introduce security
+functions within the service. Additionally, there is need for low
+latency and high security as well. A new scheduler can help with these
+needs.
+
+5G
+~~
+
+5G brings with it a number of above requirements, but perhaps the one
+that stands out the most is price/ performance. By using containers and
+unikernels, the price/ performance ratio can be significantly improved.
+(Containers or unikernels result in ~10x density with Legacy scheduling;
+higher density is possible with event-driven scheduling.) 5G will also
+bring MEC and IOT needs from the prior use case.
+
+Security
+~~~~~~~~
+
+Many traditional services are always-on. Always-on services provide
+enough time for attackers to find vulnerabilities and exploit them. By
+bringing up workloads on demand basis and terminating them upon
+completion of its usage, closes the time advantage attackers have. For
+example, in three tier architecture of “Web”, “App” and “DB”, following
+on demand bring up would reduce the attack surface
+
+- On demand bring up of “DB” service upon “APP” layer request.
+- On demand bringup of “APP” service upon “Web” layer authenticates the user.
+- On demand bring up of “Web” service upon “DNS” request or upon seeing “SYN” packet
+
+Workloads can be brought down upon inactivity or using some application
+specific methods. Thin services (implemented using unikernels & Clear
+containers) and fast schedulers are required to enable this kind of
+security.
+
+Detailed Requirements
+---------------------
+
+Multiple compute types
+~~~~~~~~~~~~~~~~~~~~~~
+
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Requirement | Details |
++========================================+=====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+
+| Support for virtual machines | VMs are the most common form of VNFs, and are not going away anytime soon. A scheduler must be able to support VMs. In theory, the MANO software could use two VIMs: one for VMs and another for containers/ unikernels. However, we believe this is a suboptimal solution since the operational complexity doubles - now the ops team has to deal with two VIM software layers. Also, networking coordination between the two VIM layers becomes complex. |
+| | |
+| | NOTE: Bare-metal server scheduling, e.g. OpenStack Ironic, is out-of-scope for this document. |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Support containers | This need is clear, the future of VNFs seems to be containerized VNFs. Containers are 10x more dense than VMs and boot 10x faster. Containers will also accelerate the move to cloud-native VNFs. Some users may want nested scheduling e.g. containers in VMs or containers in containers. Nested scheduling is out-of-scope for this document. We will only focus on one layer of scheduling problem and expect the other layer of scheduler to be distinct and separate. |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Support unikernels | Unikernels are lightweight VMs; with the same density of containers, but faster boot times than containers. Since unikernels are VMs and incredible small surface area, they have rock-solid security characteristics. Unikernels are also higher performance than VMs. For these reasons, unikernels could play an important role in NFV. The downsides with unikernels are i) they are new, ii) often tied to a programming language and iii) they require a software recompile. Unikernels are an ideal fit for micro-VNFs. More specifically: |
+| | |
+| | - Need VNFs to be highly secure by reducing significantly the attack surface |
+| | |
+| | - Need to be able to schedule to NFVI with high performance OVS-less services chaining (e.g. through shared memory) that can significantly improve performance |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Colocation | We need support for affinity/anti-affinity constraints on VNF compute type (i.e. VM, unikernel, container). This will make colocation of different types of VNF compute types on the same host possible, if needed. |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Support all compute types on one SFC | Since VNFs are procured from different vendors, it is possible to get a mix of compute types: VMs, containers, unikernels; and it should be possible to construct a service function chain from heterogeneous compute types. |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Unified API for all compute types | Even though it is theoretically possible to have different APIs for different compute types and push the problem to the MANO layer, this increases the overall complexity for the solution. For this reason, the API needs to be unified and consistent for different compute types. |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Hardware awareness | Ability to place workloads with specific hardware or underlying infrastructure capabilities (e.g. Intel EPA [1]_, FD.io, Smart NICs, Trusted Execution Environment, shared memory switching etc.) |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Rich networking | The new VIM scheduler needs to be supported by rich networking features currently available to OpenStack Nova through OpenStack Neutron (See document outlining K8s `*networking* <https://docs.google.com/document/d/1TW3P4c8auWwYy-w_5afIPDcGNLK3LZf0m14943eVfVg/edit?ts=5901ec88>`__ requirements as an example): |
+| | |
+| | - Ability to create multiple IP addresses/ VNF |
+| | |
+| | - |
+| | - Networks not having cluster-wide connectivity; not having visibility to each other |
+| | |
+| | - Multi-tenancy: i) support traffic isolation between compute entities belonging to different tenants, ii) support overlapping IP addresses across VNFs. |
+| | |
+| | - Limit services such as load balancing, service discovery etc. on certain network interfaces (see additional `*document* <https://docs.google.com/document/d/1mNZZ2lL6PERBbt653y_hnck3O4TkQhrlIzW1cIc8dJI/edit>`__). |
+| | |
+| | - L2 and L3 connectivity (?) |
+| | |
+| | - Service Discovery |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Image repository & shared storage | - Centralized/distributed image repository |
+| | |
+| | - Support shared storage (e.g. OpenStack Cinder, K8s volumes etc.) |
++----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+.. [1]
+ Intel EPA includes DPDK, SR-IOV, CPU and NUMA pinning, Huge Pages
+ etc.
+
+[OPEN QUESTION] What subset of the Neutron functionality is required
+here?
+
+Multiple scheduling techniques
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
++---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Requirement | Details |
++===========================+====================================================================================================================================================================================================================================================================================================================+
+| Legacy scheduling | This is the current technique used by OpenStack Nova and container orchestration engines. Legacy scheduling needs to be supported as-is. |
++---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Event-driven scheduling | This applies only to unikernels, since unikernels are the only compute type that can boot at packet RTT. Thus, the requirement is to be able to schedule and boot unikernel instances in response to events with <30ms of ms (e.g., event-driven type of scheduling) as a must-have and <10ms as a nice-to-have. |
++---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Distributed Scheduling | Since services need to be brought up at packet RTT, there could be requirement to distribute the scheduling across compute nodes. |
++---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Multi Stage scheduling | To enable scheduling of services at packet RTT, there is a need to divide the scheduling to at least two stages - Initial stage where multiple service images are uploaded to candidate compute nodes and second stage where distributed scheduler bring up the service using the locally cached images. |
++---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+[OPEN QUESTION] What subset of the rich scheduler feature-set is
+required here? (e.g. affinity, anti-affinity, understanding of dataplane
+acceleration etc.)
+
+Highly distributed environments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two possibilities here. A) The entire VIM will be in an edge
+device and the MANO software will have to deal with 10s or 100s of
+thousands of VIM instances. B) The alternative is that the VIM itself
+will manage edge devices, i.e. the MANO software will deal with a
+limited number of VIM instances. Both scenarios are captured below.
+
++--------------------+---------------------------------------------------------------------------------------------------------------+
+| Requirement | Details |
++====================+===============================================================================================================+
+| Small footprint | It should be possible to run the VIM scheduler in 1-2 cores. |
++--------------------+---------------------------------------------------------------------------------------------------------------+
+| Nodes across WAN | It should be possible to distribute the VIM scheduler across nodes separated by long RTT delays (i.e. WAN). |
++--------------------+---------------------------------------------------------------------------------------------------------------+
+
+Software Survey Candidates
+--------------------------
+
+Once the survey is complete, we will evaluate the following software
+stacks against those requirements. Each survey, either conducted in
+person and/or via documentation review, will consist of:
+
+1. Architecture overview
+
+2. Pros
+
+3. Cons
+
+4. Gap analysis
+
+5. How gaps can be addressed
+
+Each survey is expected to take 3-4 weeks.
+
++------------------------------------------+------------------------------------------------------+
+| CNCF K8s | Srini (talk to Xuan, Frederic, study gap analysis) |
++------------------------------------------+------------------------------------------------------+
+| Docker Swarm | |
++------------------------------------------+------------------------------------------------------+
+| VMware Photon | Srikanth |
++------------------------------------------+------------------------------------------------------+
+| Intel Clear Container | Srini |
++------------------------------------------+------------------------------------------------------+
+| Intel Ciao | Srini |
++------------------------------------------+------------------------------------------------------+
+| OpenStack Nova | |
++------------------------------------------+------------------------------------------------------+
+| Mesos | Srikanth |
++------------------------------------------+------------------------------------------------------+
+| Virtlet (VM scheduling by K8s) | Amar |
++------------------------------------------+------------------------------------------------------+
+| Kubelet (VM scheduling by K8s) | Amar |
++------------------------------------------+------------------------------------------------------+
+| Kuryr (K8s to Neutron interface) | Prem |
++------------------------------------------+------------------------------------------------------+
+| RunV (like RunC) - can it support a VM | |
++------------------------------------------+------------------------------------------------------+
+| Nelson distributed container framework | |
++------------------------------------------+------------------------------------------------------+
+| Nomad | |
++------------------------------------------+------------------------------------------------------+
+
+Additional Points to Revisit
+----------------------------
+
+- Guidance on how to create immutable infrastructure with complete configuration, and benefits to performance and security
+- Guidance on API - VNFM vs. VIM
+
diff --git a/src/cni/ovsdpdk/Dockerfile b/src/cni/ovsdpdk/Dockerfile
new file mode 100644
index 0000000..2a7208c
--- /dev/null
+++ b/src/cni/ovsdpdk/Dockerfile
@@ -0,0 +1,7 @@
+FROM ubuntu:16.04
+
+WORKDIR /cni
+ADD setup_ovsdpdk.sh .
+ADD teardown_ovsdpdk.sh .
+ADD ovsdpdk .
+ADD install_cni.sh .
diff --git a/src/cni/ovsdpdk/Vagrantfile b/src/cni/ovsdpdk/Vagrantfile
new file mode 100644
index 0000000..f170c69
--- /dev/null
+++ b/src/cni/ovsdpdk/Vagrantfile
@@ -0,0 +1,16 @@
+Vagrant.require_version ">= 1.8.6"
+Vagrant.configure("2") do |config|
+
+ config.vm.box = "bento/ubuntu-16.04"
+ config.vm.provider :virtualbox do |vb|
+ vb.customize ["modifyvm", :id, "--memory", 4096]
+ vb.customize ["modifyvm", :id, "--cpus", 4]
+ vb.customize "post-boot",["controlvm", :id, "setlinkstate1", "on"]
+ end
+
+ config.vm.define "cni-ovsdpdk" do |config|
+ config.vm.hostname = "cni-ovsdpdk"
+ config.vm.provision "shell", path: "build_cni_ovsdpdk.sh", privileged: false
+ end
+
+end
diff --git a/src/cni/ovsdpdk/build_cni_ovsdpdk.sh b/src/cni/ovsdpdk/build_cni_ovsdpdk.sh
new file mode 100755
index 0000000..71318d8
--- /dev/null
+++ b/src/cni/ovsdpdk/build_cni_ovsdpdk.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+set -ex
+
+sudo apt-get update
+sudo apt-get install -y docker.io devscripts git
+wget -qO- https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
+echo 'export GOPATH=/go; export PATH=/usr/local/go/bin:$GOPATH/bin:$PATH' >> ~/.bashrc
+export GOPATH=/go; export PATH=/usr/local/go/bin:$GOPATH/bin:$PATH
+git clone https://github.com/containernetworking/cni
+echo sudo CNI_PATH=$CNI_PATH ./priv-net-run.sh ifconfig
+git clone https://github.com/containernetworking/plugins
+cd plugins
+git checkout 5544d9ced0d6e908fe26e9dbe529c7feb87d21f5
+patch -p1 < /vagrant/ovsdpdk.patch
+./build.sh
+cd bin
+cp /vagrant/* .
+sudo docker build -t openretriever/cni-ovsdpdk .
diff --git a/src/cni/ovsdpdk/install_cni.sh b/src/cni/ovsdpdk/install_cni.sh
new file mode 100644
index 0000000..8f5b78a
--- /dev/null
+++ b/src/cni/ovsdpdk/install_cni.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -ex
+cp /cni/ovsdpdk /opt/cni/bin
+cp /cni/setup_ovsdpdk.sh /opt/cni/bin
+cp /cni/teardown_ovsdpdk.sh /opt/cni/bin
+cp /etc/kube-ovsdpdk/cni-conf.json /etc/cni/net.d/10-ovsdpdk.conf
+while true; do sleep 3600; done
diff --git a/src/cni/ovsdpdk/kube_ovsdpdk.yml b/src/cni/ovsdpdk/kube_ovsdpdk.yml
new file mode 100644
index 0000000..2bcebdc
--- /dev/null
+++ b/src/cni/ovsdpdk/kube_ovsdpdk.yml
@@ -0,0 +1,71 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ovsdpdk
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: kube-ovsdpdk-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ app: ovsdpdk
+data:
+ cni-conf.json: |
+ {
+ "name": "ovsdpdk",
+ "type": "ovsdpdk",
+ "bridge": "br-dpdk",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.244.0.0/16"
+ }
+ }
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kube-ovsdpdk-ds
+ namespace: kube-system
+ labels:
+ tier: node
+ app: ovsdpdk
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: ovsdpdk
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: ovsdpdk
+ containers:
+ - name: install-cni
+ image: openretriever/cni-ovsdpdk
+ command: [ "/bin/bash", "/cni/install_cni.sh" ]
+ volumeMounts:
+ - name: cni-bin
+ mountPath: /opt/cni/bin
+ - name: cni-cfg
+ mountPath: /etc/cni/net.d
+ - name: ovsdpdk-cfg
+ mountPath: /etc/kube-ovsdpdk
+ volumes:
+ - name: cni-bin
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-cfg
+ hostPath:
+ path: /etc/cni/net.d
+ - name: ovsdpdk-cfg
+ configMap:
+ name: kube-ovsdpdk-cfg
diff --git a/src/cni/ovsdpdk/ovsdpdk.patch b/src/cni/ovsdpdk/ovsdpdk.patch
new file mode 100644
index 0000000..67b3703
--- /dev/null
+++ b/src/cni/ovsdpdk/ovsdpdk.patch
@@ -0,0 +1,136 @@
+diff --git a/build.sh b/build.sh
+index cd21ba8..bc60d91 100755
+--- a/build.sh
++++ b/build.sh
+@@ -19,7 +19,7 @@ export GOPATH=${PWD}/gopath
+ mkdir -p "${PWD}/bin"
+
+ echo "Building plugins"
+-PLUGINS="plugins/meta/* plugins/main/* plugins/ipam/* plugins/sample"
++PLUGINS="plugins/main/ovsdpdk plugins/main/bridge plugins/ipam/host-local"
+ for d in $PLUGINS; do
+ if [ -d "$d" ]; then
+ plugin="$(basename "$d")"
+diff --git a/plugins/main/ovsdpdk/ovsdpdk.go b/plugins/main/ovsdpdk/ovsdpdk.go
+new file mode 100644
+index 0000000..1b931d4
+--- /dev/null
++++ b/plugins/main/ovsdpdk/ovsdpdk.go
+@@ -0,0 +1,117 @@
++// Copyright 2014 CNI authors
++//
++// Licensed under the Apache License, Version 2.0 (the "License");
++// you may not use this file except in compliance with the License.
++// You may obtain a copy of the License at
++//
++// http://www.apache.org/licenses/LICENSE-2.0
++//
++// Unless required by applicable law or agreed to in writing, software
++// distributed under the License is distributed on an "AS IS" BASIS,
++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++// See the License for the specific language governing permissions and
++// limitations under the License.
++
++package main
++
++import (
++ "encoding/json"
++ "errors"
++ "fmt"
++ //"net"
++ "runtime"
++ //"syscall"
++ "os/exec"
++ //"io/ioutil"
++
++ "github.com/containernetworking/cni/pkg/skel"
++ "github.com/containernetworking/cni/pkg/types"
++ "github.com/containernetworking/cni/pkg/types/current"
++ "github.com/containernetworking/cni/pkg/version"
++ //"github.com/containernetworking/plugins/pkg/ip"
++ "github.com/containernetworking/plugins/pkg/ipam"
++ //"github.com/containernetworking/plugins/pkg/ns"
++ //"github.com/containernetworking/plugins/pkg/utils"
++ //"github.com/vishvananda/netlink"
++)
++
++const defaultBrName = "cni0"
++
++type NetConf struct {
++ types.NetConf
++ BrName string `json:"bridge"`
++}
++
++func init() {
++ // this ensures that main runs only on main thread (thread group leader).
++ // since namespace ops (unshare, setns) are done for a single thread, we
++ // must ensure that the goroutine does not jump from OS thread to thread
++ runtime.LockOSThread()
++}
++
++func loadNetConf(bytes []byte) (*NetConf, string, error) {
++ n := &NetConf{
++ BrName: defaultBrName,
++ }
++ if err := json.Unmarshal(bytes, n); err != nil {
++ return nil, "", fmt.Errorf("failed to load netconf: %v", err)
++ }
++ return n, n.CNIVersion, nil
++}
++
++func setupVhostUser(args *skel.CmdArgs, types string) error {
++ exec.Command("/bin/bash", "/opt/cni/bin/setup_ovsdpdk.sh", args.Netns, args.ContainerID, types).Output()
++ return nil
++}
++
++
++func cmdAdd(args *skel.CmdArgs) error {
++ n, cniVersion, err := loadNetConf(args.StdinData)
++ if err != nil {
++ return err
++ }
++
++ // run the IPAM plugin and get back the config to apply
++ r, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData)
++ if err != nil {
++ return err
++ }
++
++ // Convert whatever the IPAM result was into the current Result type
++ result, err := current.NewResultFromResult(r)
++ if err != nil {
++ return err
++ }
++
++ if len(result.IPs) == 0 {
++ return errors.New("IPAM plugin returned missing IP config")
++ }
++
++ setupVhostUser(args, result.String())
++
++ return types.PrintResult(result, cniVersion)
++}
++
++func tearDownVhostUser(args *skel.CmdArgs) error {
++ exec.Command("/bin/bash", "/opt/cni/bin/teardown_ovsdpdk.sh", args.Netns, args.ContainerID).Output()
++ return nil
++}
++
++func cmdDel(args *skel.CmdArgs) error {
++ n, _, err := loadNetConf(args.StdinData)
++ if err != nil {
++ return err
++ }
++
++ if err := ipam.ExecDel(n.IPAM.Type, args.StdinData); err != nil {
++ return err
++ }
++
++ tearDownVhostUser(args)
++ return err
++
++}
++
++func main() {
++ skel.PluginMain(cmdAdd, cmdDel, version.All)
++}
diff --git a/src/cni/ovsdpdk/setup_ovsdpdk.sh b/src/cni/ovsdpdk/setup_ovsdpdk.sh
new file mode 100755
index 0000000..a1813c9
--- /dev/null
+++ b/src/cni/ovsdpdk/setup_ovsdpdk.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+netns=$1
+containerid=$2
+ip=$3
+pid=$(echo $netns | cut -f3 -d"/")
+
+sudo ovs-vsctl --may-exist add-br br-dpdk -- set bridge br-dpdk datapath_type=netdev
+sudo ovs-vsctl --may-exist add-port br-dpdk vhost-user-$pid -- set Interface vhost-user-$pid type=dpdkvhostuser
+sudo ln -sf $netns /var/run/netns/$pid
+sudo ip link add dummy-$pid type dummy
+sudo ip link set dummy-$pid netns $pid
+sudo mkdir -p /var/run/cni
+echo $ip | sudo tee /var/run/cni/netconf-$pid
diff --git a/src/cni/ovsdpdk/teardown_ovsdpdk.sh b/src/cni/ovsdpdk/teardown_ovsdpdk.sh
new file mode 100755
index 0000000..857738e
--- /dev/null
+++ b/src/cni/ovsdpdk/teardown_ovsdpdk.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+netns=$1
+containerid=$2
+pid=$(echo $netns | cut -f3 -d"/")
+
+sudo ovs-vsctl del-port br-dpdk vhost-user-$pid
+sudo ip netns exec $pid link delete dummy-$pid
+sudo rm -rf /var/run/cni/netconf-$pid
diff --git a/src/fuel-plugin/README.md b/src/fuel-plugin/README.md
deleted file mode 100644
index e69de29..0000000
--- a/src/fuel-plugin/README.md
+++ /dev/null
diff --git a/src/fuel-plugin/deployment_scripts/k8s-master-install.sh b/src/fuel-plugin/deployment_scripts/k8s-master-install.sh
deleted file mode 100755
index 6d05ede..0000000
--- a/src/fuel-plugin/deployment_scripts/k8s-master-install.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-set -eux
-
-api_advertise_address=$1
-service_cidr=$2
-pod_network=$3
-pod_network_cidr=$4
-token='8c5adc.1cec8dbf339093f0'
-
-curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
-cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
-deb http://apt.kubernetes.io/ kubernetes-xenial main
-EOF
-
-sudo apt-get update
-sudo apt-get install -y docker.io
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
-rm -rf /var/lib/kubelet
-sudo kubeadm init --api-advertise-addresses $api_advertise_address --service-cidr=$service_cidr --pod-network-cidr=$pod_network_cidr --token $token
-
-if [ $pod_network_cidr = 'flannel' ]; then
- sudo kubectl apply -f http://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
-else
- sudo kubectl apply -f http://git.io/weave-kube
-fi
diff --git a/src/fuel-plugin/deployment_scripts/k8s-slave-install.sh b/src/fuel-plugin/deployment_scripts/k8s-slave-install.sh
deleted file mode 100755
index cc7652f..0000000
--- a/src/fuel-plugin/deployment_scripts/k8s-slave-install.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-set -eux
-
-api_advertise_address=$1
-token='8c5adc.1cec8dbf339093f0'
-
-curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
-cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
-deb http://apt.kubernetes.io/ kubernetes-xenial main
-EOF
-sudo apt-get update
-sudo apt-get install -y docker.io
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
-rm -rf /var/lib/kubelet
-sudo kubeadm join --token $token $api_advertise_address || true
diff --git a/src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-master-install.pp b/src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-master-install.pp
deleted file mode 100644
index 8c52fad..0000000
--- a/src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-master-install.pp
+++ /dev/null
@@ -1,22 +0,0 @@
-notice('MODULAR: k8s-master-install')
-# get options
-
-$network_metadata = hiera_hash('network_metadata')
-$k8s_nodes_hash = get_nodes_hash_by_roles($network_metadata, ['k8s-master'])
-$k8s_mgmt_ips_hash = get_node_to_ipaddr_map_by_network_role($k8s_nodes_hash, 'management')
-$k8s_mgmt_ips = values($k8s_mgmt_ips_hash)
-
-$network_scheme = hiera_hash('network_scheme')
-$service_cidr = $network_scheme['endpoints']['br-mgmt']['IP']
-
-$k8s_settings = hiera_hash('fuel-plugin-k8s')
-$pod_network = $k8s_settings['pod_network']
-$pod_network_cidr = $k8s_settings['pod_network_cidr']
-
-if $operatingsystem == 'Ubuntu' {
- exec { 'install k8s master':
- command => "/etc/fuel/plugins/fuel-plugin-k8s-1.0/k8s-master-install.sh $k8s_mgmt_ips $service_cidr $pod_network $pod_network_cidr",
- path => '/usr/bin:/usr/sbin:/bin:/sbin',
- }
-} elsif $operatingsystem == 'CentOS' {
-}
diff --git a/src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-slave-install.pp b/src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-slave-install.pp
deleted file mode 100644
index 4e4863b..0000000
--- a/src/fuel-plugin/deployment_scripts/puppet/manifests/k8s-slave-install.pp
+++ /dev/null
@@ -1,15 +0,0 @@
-notice('MODULAR: k8s-slave-install')
-# get options
-
-$network_metadata = hiera_hash('network_metadata')
-$k8s_nodes_hash = get_nodes_hash_by_roles($network_metadata, ['k8s-master'])
-$k8s_mgmt_ips_hash = get_node_to_ipaddr_map_by_network_role($k8s_nodes_hash, 'management')
-$k8s_mgmt_ips = values($k8s_mgmt_ips_hash)
-
-if $operatingsystem == 'Ubuntu' {
- exec { 'install k8s slave':
- command => "/etc/fuel/plugins/fuel-plugin-k8s-1.0/k8s-slave-install.sh $k8s_mgmt_ips",
- path => '/usr/bin:/usr/sbin:/bin:/sbin',
- }
-} elsif $operatingsystem == 'CentOS' {
-}
diff --git a/src/fuel-plugin/deployment_tasks.yaml b/src/fuel-plugin/deployment_tasks.yaml
deleted file mode 100644
index be7ce74..0000000
--- a/src/fuel-plugin/deployment_tasks.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-- id: k8s
- type: group
- role: [k8s-master, k8s-slave]
- requires: [deploy_start]
- required_for: [deploy_end]
- tasks: [hiera, setup_repositories, fuel_pkgs, globals, tools, logging, netconfig]
- parameters:
- strategy:
- type: parallel
-
-- id: k8s-master-install
- type: puppet
- version: 2.0.0
- groups: [k8s-master]
- required_for: [deploy_end]
- requires: [netconfig]
- parameters:
- puppet_manifest: puppet/manifests/k8s-master-install.pp
- puppet_modules: puppet/modules:/etc/puppet/modules
- timeout: 720
-
-- id: k8s-slave-install
- type: puppet
- version: 2.0.0
- groups: [k8s-slave]
- required_for: [post_deployment_end]
- requires: [post_deployment_start]
- parameters:
- puppet_manifest: puppet/manifests/k8s-slave-install.pp
- puppet_modules: puppet/modules:/etc/puppet/modules
- timeout: 720
diff --git a/src/fuel-plugin/environment_config.yaml b/src/fuel-plugin/environment_config.yaml
deleted file mode 100644
index 631b777..0000000
--- a/src/fuel-plugin/environment_config.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-attributes:
- pod_network:
- weight: 21
- type: "select"
- value: "weave"
- label: "K8s POD network"
- values:
- - data: "flannel"
- label: "Flannel"
- - data: "weave"
- label: "Weave"
- description: "K8s POD network."
-
- pod_network_cidr:
- value: '10.244.0.0/16'
- label: 'k8s POD CIDR'
- weight: 70
- type: "text"
- description: 'K8s POD network CIDR.'
diff --git a/src/fuel-plugin/metadata.yaml b/src/fuel-plugin/metadata.yaml
deleted file mode 100644
index e78f7e1..0000000
--- a/src/fuel-plugin/metadata.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Plugin name
-name: fuel-plugin-k8s
-# Human-readable name for your plugin
-title: Install k8s on bare metal
-# Plugin version
-version: '1.0.0'
-# Description
-description: 'This plugin provides to deploy k8s on bare metal'
-# Required fuel version
-fuel_version: ['10.0']
-# Specify license of your plugin
-licenses: ['Apache License Version 2.0']
-# Specify author or company name
-authors: ['ruijing.guo@intel.com']
-# A link to the plugin's page
-homepage: 'https://gerrit.opnfv.org/gerrit/openretriever'
-# Specify a group which your plugin implements, possible options:
-# network, storage, storage::cinder, storage::glance, hypervisor
-groups: ['network']
-is_hotpluggable: false
-# The plugin is compatible with releases in the list
-releases:
- - os: ubuntu
- version: newton-10.0
- mode: ['ha']
- deployment_scripts_path: deployment_scripts/
- repository_path: deployment_scripts/
-
-# Version of plugin package
-package_version: '4.0.0'
diff --git a/src/fuel-plugin/node_roles.yaml b/src/fuel-plugin/node_roles.yaml
deleted file mode 100644
index 1906dc2..0000000
--- a/src/fuel-plugin/node_roles.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-k8s-master:
- name: "k8s-master"
- description: "Install K8s master on nodes with this role"
- has_primary: false # whether has primary role or not
- public_ip_required: false # whether requires public net or not
- weight: 50 # weight that will be used for ordering on fuel ui
- limits:
- min: 0
-
-k8s-slave:
- name: "k8s-slave"
- description: "Install K8s slave on nodes with this role"
- has_primary: false # whether has primary role or not
- public_ip_required: false # whether requires public net or not
- weight: 50 # weight that will be used for ordering on fuel ui
- limits:
- min: 0
diff --git a/src/fuel-plugin/tasks.yaml b/src/fuel-plugin/tasks.yaml
deleted file mode 100644
index fe51488..0000000
--- a/src/fuel-plugin/tasks.yaml
+++ /dev/null
@@ -1 +0,0 @@
-[]
diff --git a/src/fuel-plugin/vagrant/Vagrantfile b/src/fuel-plugin/vagrant/Vagrantfile
deleted file mode 100644
index 8f5e620..0000000
--- a/src/fuel-plugin/vagrant/Vagrantfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-
- config.vm.box = "ubuntu/trusty64"
-
- config.vm.define "fuel" do | h |
- h.vm.host_name = "fuel"
- h.vm.provision :shell, :inline => "/vagrant/build_fuel_plugin.sh", privileged: false
- h.vm.synced_folder "..", "/fuel-plugin"
- h.vm.provider :virtualbox do |v|
- v.customize ["modifyvm", :id, "--memory", 4096]
- v.customize ["modifyvm", :id, "--cpus", 4]
- v.customize "post-boot",["controlvm", :id, "setlinkstate1", "on"]
- end
- end
-end
diff --git a/src/fuel-plugin/vagrant/build_fuel_plugin.sh b/src/fuel-plugin/vagrant/build_fuel_plugin.sh
deleted file mode 100755
index 4cd579f..0000000
--- a/src/fuel-plugin/vagrant/build_fuel_plugin.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-sudo apt-get update -y
-sudo apt-get install -y ruby-dev rubygems-integration python-pip rpm createrepo dpkg-dev
-sudo gem install fpm
-sudo pip install fuel-plugin-builder
-cp -r /fuel-plugin /home/vagrant
-cd /home/vagrant/fuel-plugin; fpb --debug --build .
-cp /home/vagrant/fuel-plugin/*.rpm /vagrant
diff --git a/src/vagrant/k8s_kubeadm/Vagrantfile b/src/vagrant/k8s_kubeadm/Vagrantfile
index 3baf072..3890e57 100644
--- a/src/vagrant/k8s_kubeadm/Vagrantfile
+++ b/src/vagrant/k8s_kubeadm/Vagrantfile
@@ -4,9 +4,14 @@ Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
config.vm.box = "bento/ubuntu-16.04"
+ config.vm.synced_folder "../..", "/src"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", 4096]
vb.customize ["modifyvm", :id, "--cpus", 4]
+ vb.customize ["modifyvm", :id, "--nicpromisc3", "allow-all"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
vb.customize "post-boot",["controlvm", :id, "setlinkstate1", "on"]
end
@@ -14,6 +19,7 @@ Vagrant.configure("2") do |config|
config.vm.hostname = "master"
config.vm.provision "shell", path: "master_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.1.10"
+ config.vm.network :private_network, ip: "192.168.2.10"
end
(1 .. $num_workers).each do |i|
@@ -21,6 +27,7 @@ Vagrant.configure("2") do |config|
config.vm.hostname = vm_name
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.1.#{i+20}"
+ config.vm.network :private_network, ip: "192.168.2.#{i+20}"
end
end
diff --git a/src/vagrant/k8s_kubeadm/examples/virtio-user.yaml b/src/vagrant/k8s_kubeadm/examples/virtio-user.yaml
new file mode 100644
index 0000000..9ab1e06
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/examples/virtio-user.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: virtiouser
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: virtiouser
+ spec:
+ containers:
+ - name: virtiouser
+ image: openretriever/virtio-user-ping
+ volumeMounts:
+ - mountPath: /dev/hugepages
+ name: hugepage-volume
+ - mountPath: /var/run
+ name: vhost-volume
+ command:
+ - /root/setup_virtio_user.sh
+ volumes:
+ - name: hugepage-volume
+ hostPath:
+ path: /dev/hugepages
+ - name: vhost-volume
+ hostPath:
+ path: /var/run
+ restartPolicy: Always
diff --git a/src/vagrant/k8s_kubeadm/examples/yardstick.sh b/src/vagrant/k8s_kubeadm/examples/yardstick.sh
new file mode 100755
index 0000000..bc1eecf
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/examples/yardstick.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+sudo docker run -tid -v /etc/kubernetes/admin.conf:/etc/yardstick/admin.conf --name yardstick opnfv/yardstick:latest
+sudo docker exec -ti yardstick yardstick task start yardstick/samples/ping_k8s.yaml
diff --git a/src/vagrant/k8s_kubeadm/host_setup.sh b/src/vagrant/k8s_kubeadm/host_setup.sh
new file mode 100644
index 0000000..990df7f
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/host_setup.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.10 master
+192.168.1.21 worker1
+192.168.1.22 worker2
+192.168.1.23 worker3
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y docker.io
+sudo apt-get install -y --allow-downgrades kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
diff --git a/src/vagrant/k8s_kubeadm/master_setup.sh b/src/vagrant/k8s_kubeadm/master_setup.sh
index 31e7901..e98e2bb 100644
--- a/src/vagrant/k8s_kubeadm/master_setup.sh
+++ b/src/vagrant/k8s_kubeadm/master_setup.sh
@@ -1,23 +1,6 @@
-#!/usr/bin/env bash
+#!/bin/bash
-set -e
-HOME=`pwd`
-
-cat << EOF | sudo tee /etc/hosts
-127.0.0.1 localhost
-192.168.1.10 master
-192.168.1.21 worker1
-192.168.1.22 worker2
-192.168.1.23 worker3
-EOF
-
-curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
-cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
-deb http://apt.kubernetes.io/ kubernetes-xenial main
-EOF
-sudo apt-get update
-sudo apt-get install -y docker.io
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+sudo ifconfig br-dpdk 10.244.0.1/16 up
sudo kubeadm init --apiserver-advertise-address 192.168.1.10 --service-cidr=192.168.1.0/24 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
sudo cp /etc/kubernetes/admin.conf $HOME/
@@ -28,3 +11,5 @@ echo "export KUBECONFIG=$HOME/admin.conf" >> $HOME/.bash_profile
kubectl apply -f http://git.io/weave-kube-1.6
#kubectl apply -f http://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#kubectl apply -f http://docs.projectcalico.org/v2.1/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
+#kubectl apply -f /vagrant/k8s_kubeadm/dpdk/kube_ovsdpdk.yml
+#kubectl apply -f /src/cni/ovsdpdk/kube_ovsdpdk.yml
diff --git a/src/vagrant/k8s_kubeadm/ovsdpdk/Dockerfile b/src/vagrant/k8s_kubeadm/ovsdpdk/Dockerfile
new file mode 100644
index 0000000..a63df8f
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/ovsdpdk/Dockerfile
@@ -0,0 +1,8 @@
+FROM ubuntu:16.04
+
+WORKDIR /ovsdpdk
+RUN apt-get update && apt-get install -y sudo
+ADD install.sh .
+RUN bash ./install.sh
+ADD start.sh .
+RUN chmod 755 start.sh
diff --git a/src/vagrant/k8s_kubeadm/ovsdpdk/install.sh b/src/vagrant/k8s_kubeadm/ovsdpdk/install.sh
new file mode 100644
index 0000000..66fb71d
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/ovsdpdk/install.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -ex
+sudo apt-get update
+sudo apt-get install -y openvswitch-switch-dpdk linux-image-extra-4.4.0-75-generic
+sudo update-alternatives --set ovs-vswitchd /usr/lib/openvswitch-switch-dpdk/ovs-vswitchd-dpdk
+echo "DPDK_OPTS='--dpdk -c 0x1 -n 4 -m 1024'" | sudo tee -a /etc/default/openvswitch-switch
diff --git a/src/vagrant/k8s_kubeadm/ovsdpdk/kube_ovsdpdk.yml b/src/vagrant/k8s_kubeadm/ovsdpdk/kube_ovsdpdk.yml
new file mode 100644
index 0000000..d79da15
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/ovsdpdk/kube_ovsdpdk.yml
@@ -0,0 +1,53 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ovsdpdk
+ namespace: kube-system
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kube-ovsdpdk-ds
+ namespace: kube-system
+ labels:
+ tier: node
+ app: ovsdpdk
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: ovsdpdk
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: ovsdpdk
+ containers:
+ - name: install-ovsdpdk
+ image: openretriever/ubuntu1604-ovsdpdk
+ command: [ "/bin/bash", "/ovsdpdk/start.sh" ]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: local-bin
+ mountPath: /usr/local/bin
+ - name: var-run
+ mountPath: /var/run/openvswitch
+ - name: dev-hugepage
+ mountPath: /dev
+ volumes:
+ - name: local-bin
+ hostPath:
+ path: /usr/local/bin
+ - name: var-run
+ hostPath:
+ path: /var/run/openvswitch
+ - name: dev-hugepage
+ hostPath:
+ path: /dev
diff --git a/src/vagrant/k8s_kubeadm/ovsdpdk/start.sh b/src/vagrant/k8s_kubeadm/ovsdpdk/start.sh
new file mode 100644
index 0000000..08d8143
--- /dev/null
+++ b/src/vagrant/k8s_kubeadm/ovsdpdk/start.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -ex
+sudo sysctl -w vm.nr_hugepages=2048
+sudo mount -t hugetlbfs -o pagesize=2M none /dev/hugepages
+cp /usr/bin/ovs-vsctl /usr/local/bin
+sudo service dpdk restart
+sudo service openvswitch-switch restart
+sudo ovs-vsctl add-br br-dpdk -- set bridge br-dpdk datapath_type=netdev
+sudo modprobe uio_pci_generic
+#sudo ip address flush enp0s9
+#sudo /usr/share/dpdk/tools/dpdk_nic_bind.py --bind=uio_pci_generic enp0s9
+#sudo ovs-vsctl add-port br-dpdk dpdk0 -- set Interface dpdk0 type=dpdk
+while true; do sleep 3600; done
+echo sudo docker run -ti --privileged -v /dev:/dev -v /usr/local/bin:/usr/local/bin -v /var/run/openvswitch/:/var/run/openvswitch/ dpdk /ovsdpdk/start.sh
diff --git a/src/vagrant/k8s_kubeadm/worker_setup.sh b/src/vagrant/k8s_kubeadm/worker_setup.sh
index d04cae1..b68d800 100644
--- a/src/vagrant/k8s_kubeadm/worker_setup.sh
+++ b/src/vagrant/k8s_kubeadm/worker_setup.sh
@@ -1,22 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
-set -e
-HOME=`pwd`
-
-cat << EOF | sudo tee /etc/hosts
-127.0.0.1 localhost
-192.168.1.10 master
-192.168.1.21 worker1
-192.168.1.22 worker2
-192.168.1.23 worker3
-EOF
-
-curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
-cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
-deb http://apt.kubernetes.io/ kubernetes-xenial main
-EOF
-sudo apt-get update
-sudo apt-get install -y docker.io
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+set -ex
sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
-echo "vagrant ssh master -c '/vagrant/examples/nginx-app.sh'"
diff --git a/src/vnf/ping/Dockerfile b/src/vnf/ping/Dockerfile
new file mode 100644
index 0000000..9143623
--- /dev/null
+++ b/src/vnf/ping/Dockerfile
@@ -0,0 +1,8 @@
+FROM ubuntu:16.04
+LABEL maintainer="OPNFV OpenRetreiver"
+
+EXPOSE 22
+RUN apt-get update -y
+RUN apt-get install -y sudo openssh-server inetutils-ping
+COPY start.sh /usr/local/bin
+RUN chmod 755 /usr/local/bin/start.sh
diff --git a/src/vnf/ping/start.sh b/src/vnf/ping/start.sh
new file mode 100755
index 0000000..2596a65
--- /dev/null
+++ b/src/vnf/ping/start.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+chmod 700 ~/.ssh
+chmod 600 ~/.ssh
+sudo service ssh restart
+while true ; do sleep 10000; done
diff --git a/src/vnf/virtio-user-ping/01-add-single-file.patch b/src/vnf/virtio-user-ping/01-add-single-file.patch
new file mode 100644
index 0000000..a686502
--- /dev/null
+++ b/src/vnf/virtio-user-ping/01-add-single-file.patch
@@ -0,0 +1,171 @@
+Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
+---
+ lib/librte_eal/common/eal_common_options.c | 18 +++++++++++
+ lib/librte_eal/common/eal_internal_cfg.h | 1 +
+ lib/librte_eal/common/eal_options.h | 2 ++
+ lib/librte_eal/linuxapp/eal/eal.c | 4 +--
+ lib/librte_eal/linuxapp/eal/eal_memory.c | 49 +++++++++++++++++++++++++-----
+ 5 files changed, 64 insertions(+), 10 deletions(-)
+
+diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
+index f470195..4ad41b3 100644
+--- a/lib/librte_eal/common/eal_common_options.c
++++ b/lib/librte_eal/common/eal_common_options.c
+@@ -95,6 +95,7 @@ eal_long_options[] = {
+ {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM },
+ {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM },
+ {OPT_XEN_DOM0, 0, NULL, OPT_XEN_DOM0_NUM },
++ {OPT_SINGLE_FILE, 0, NULL, OPT_SINGLE_FILE_NUM },
+ {0, 0, NULL, 0 }
+ };
+
+@@ -933,6 +934,10 @@ eal_parse_common_option(int opt, const char *optarg,
+ core_parsed = 1;
+ break;
+
++ case OPT_SINGLE_FILE_NUM:
++ conf->single_file = 1;
++ break;
++
+ /* don't know what to do, leave this to caller */
+ default:
+ return 1;
+@@ -1025,6 +1030,17 @@ eal_check_common_options(struct internal_config *internal_cfg)
+ return -1;
+ }
+
++ if (internal_cfg->single_file && internal_cfg->force_sockets == 1) {
++ RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE" cannot "
++ "be specified together with --"OPT_SOCKET_MEM"\n");
++ return -1;
++ }
++ if (internal_cfg->single_file && internal_cfg->hugepage_unlink) {
++ RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot "
++ "be specified together with --"OPT_SINGLE_FILE"\n");
++ return -1;
++ }
++
+ if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 &&
+ rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) {
+ RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
+@@ -1056,6 +1072,8 @@ eal_common_usage(void)
+ " -n CHANNELS Number of memory channels\n"
+ " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n"
+ " -r RANKS Force number of memory ranks (don't detect)\n"
++ " --"OPT_SINGLE_FILE" Create single file for shared memory, and \n"
++ " do not promise physical contiguity of memseg\n"
+ " -b, --"OPT_PCI_BLACKLIST" Add a PCI device in black list.\n"
+ " Prevent EAL from using this PCI device. The argument\n"
+ " format is <domain:bus:devid.func>.\n"
+diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
+index 7b7e8c8..82b0f97 100644
+--- a/lib/librte_eal/common/eal_internal_cfg.h
++++ b/lib/librte_eal/common/eal_internal_cfg.h
+@@ -61,6 +61,7 @@ struct hugepage_info {
+ */
+ struct internal_config {
+ volatile size_t memory; /**< amount of asked memory */
++ volatile unsigned single_file; /**< map all hugepages in single file */
+ volatile unsigned force_nchannel; /**< force number of channels */
+ volatile unsigned force_nrank; /**< force number of ranks */
+ volatile unsigned no_hugetlbfs; /**< true to disable hugetlbfs */
+diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
+index a881c62..e5da14a 100644
+--- a/lib/librte_eal/common/eal_options.h
++++ b/lib/librte_eal/common/eal_options.h
+@@ -83,6 +83,8 @@ enum {
+ OPT_VMWARE_TSC_MAP_NUM,
+ #define OPT_XEN_DOM0 "xen-dom0"
+ OPT_XEN_DOM0_NUM,
++#define OPT_SINGLE_FILE "single-file"
++ OPT_SINGLE_FILE_NUM,
+ OPT_LONG_MAX_NUM
+ };
+
+diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
+index 7c78f2d..b6f2ca2 100644
+--- a/lib/librte_eal/linuxapp/eal/eal.c
++++ b/lib/librte_eal/linuxapp/eal/eal.c
+@@ -839,6 +839,8 @@ rte_eal_init(int argc, char **argv)
+ }
+ #endif
+
++ eal_thread_init_master(rte_config.master_lcore);
++
+ if (rte_eal_memory_init() < 0) {
+ rte_eal_init_alert("Cannot init memory\n");
+ rte_errno = ENOMEM;
+@@ -877,8 +879,6 @@ rte_eal_init(int argc, char **argv)
+ if (eal_plugins_init() < 0)
+ rte_eal_init_alert("Cannot init plugins\n");
+
+- eal_thread_init_master(rte_config.master_lcore);
+-
+ ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+
+ RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%x;cpuset=[%s%s])\n", diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
+index 618a09b..70c6536 100644
+--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
++++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
+@@ -982,20 +982,53 @@ rte_eal_hugepage_init(void)
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+- /* hugetlbfs can be disabled */
+- if (internal_config.no_hugetlbfs) {
+- addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
+- MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
++ /* when hugetlbfs is disabled or single-file option is specified */
++ if (internal_config.no_hugetlbfs || internal_config.single_file) {
++ int fd;
++ uint64_t pagesize;
++ unsigned socket_id = rte_socket_id();
++ char filepath[MAX_HUGEPAGE_PATH];
++
++ if (internal_config.no_hugetlbfs) {
++ eal_get_hugefile_path(filepath, sizeof(filepath),
++ "/dev/shm", 0);
++ pagesize = RTE_PGSIZE_4K;
++ } else {
++ struct hugepage_info *hpi;
++
++ hpi = &internal_config.hugepage_info[0];
++ eal_get_hugefile_path(filepath, sizeof(filepath),
++ hpi->hugedir, 0);
++ pagesize = hpi->hugepage_sz;
++ }
++ fd = open(filepath, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
++ if (fd < 0) {
++ RTE_LOG(ERR, EAL, "%s: open %s failed: %s\n",
++ __func__, filepath, strerror(errno));
++ return -1;
++ }
++
++ if (ftruncate(fd, internal_config.memory) < 0) {
++ RTE_LOG(ERR, EAL, "ftuncate %s failed: %s\n",
++ filepath, strerror(errno));
++ return -1;
++ }
++
++ addr = mmap(NULL, internal_config.memory,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED | MAP_POPULATE, fd, 0);
+ if (addr == MAP_FAILED) {
+- RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
+- strerror(errno));
++ RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n",
++ __func__, strerror(errno));
+ return -1;
+ }
+ mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+ mcfg->memseg[0].addr = addr;
+- mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
++ mcfg->memseg[0].hugepage_sz = pagesize;
+ mcfg->memseg[0].len = internal_config.memory;
+- mcfg->memseg[0].socket_id = 0;
++ mcfg->memseg[0].socket_id = socket_id;
++
++ close(fd);
+ return 0;
+ }
+
diff --git a/src/vnf/virtio-user-ping/02-fix-nohuge-option.patch b/src/vnf/virtio-user-ping/02-fix-nohuge-option.patch
new file mode 100644
index 0000000..3243eaf
--- /dev/null
+++ b/src/vnf/virtio-user-ping/02-fix-nohuge-option.patch
@@ -0,0 +1,19 @@
+Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
+---
+ lib/librte_eal/bsdapp/eal/eal.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c index 05f0c1f..b62ef69 100644
+--- a/lib/librte_eal/bsdapp/eal/eal.c
++++ b/lib/librte_eal/bsdapp/eal/eal.c
+@@ -546,9 +546,7 @@ rte_eal_init(int argc, char **argv)
+ }
+
+ if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
+- if (internal_config.no_hugetlbfs)
+- internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+- else
++ if (!internal_config.no_hugetlbfs)
+ internal_config.memory = eal_get_hugepage_mem_size();
+ }
+
diff --git a/src/vnf/virtio-user-ping/Dockerfile b/src/vnf/virtio-user-ping/Dockerfile
new file mode 100644
index 0000000..8b09548
--- /dev/null
+++ b/src/vnf/virtio-user-ping/Dockerfile
@@ -0,0 +1,12 @@
+FROM ubuntu:16.04
+
+RUN apt-get update
+RUN apt-get -y install sudo apt-transport-https devscripts git wget vim net-tools
+ADD 01-add-single-file.patch /root
+ADD 02-fix-nohuge-option.patch /root
+ADD virtio-user.patch /root
+ADD build_vpp.sh /root
+ADD setup_vpp.sh /root
+ADD startup.conf /root
+ADD setup_virtio_user.sh /root
+RUN /root/build_vpp.sh
diff --git a/src/vnf/virtio-user-ping/Vagrantfile b/src/vnf/virtio-user-ping/Vagrantfile
new file mode 100644
index 0000000..3f5a477
--- /dev/null
+++ b/src/vnf/virtio-user-ping/Vagrantfile
@@ -0,0 +1,22 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+
+ config.vm.box = "bento/ubuntu-16.04"
+
+ config.vm.define "ovs-virtio" do | h |
+ config.vm.host_name = "ovs-virtio"
+ config.vm.provision "shell", path: "setup_ovs_virtio.sh", privileged: false
+ config.vm.provider :virtualbox do |v|
+ v.customize ["modifyvm", :id, "--memory", 8192]
+ v.customize ["modifyvm", :id, "--cpus", 4]
+ v.customize "post-boot",["controlvm", :id, "setlinkstate1", "on"]
+ v.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ v.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
+ end
+ end
+end
diff --git a/src/vnf/virtio-user-ping/build_vpp.sh b/src/vnf/virtio-user-ping/build_vpp.sh
new file mode 100755
index 0000000..9404bb1
--- /dev/null
+++ b/src/vnf/virtio-user-ping/build_vpp.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+sudo apt-get update
+sudo apt-get -y install sudo apt-transport-https devscripts git wget vim net-tools
+cd /root
+git clone https://gerrit.fd.io/r/vpp
+cd vpp
+git checkout stable/1707
+cp ../01-add-single-file.patch dpdk/dpdk-17.05_patches
+cp ../02-fix-nohuge-option.patch dpdk/dpdk-17.05_patches
+patch -p1 < ../virtio-user.patch
+make UNATTENDED=yes install-dep || true
+make bootstrap
+make build; find . -type f | grep "install.*bin" | xargs -I {} cp {} /usr/bin/
diff --git a/src/vnf/virtio-user-ping/setup_ovs_virtio.sh b/src/vnf/virtio-user-ping/setup_ovs_virtio.sh
new file mode 100755
index 0000000..a565589
--- /dev/null
+++ b/src/vnf/virtio-user-ping/setup_ovs_virtio.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+sudo sysctl -w vm.nr_hugepages=1024
+sudo mount -t hugetlbfs -o pagesize=2M none /dev/hugepages
+
+sudo apt-get update -y
+sudo apt-get install -y openvswitch-switch-dpdk
+sudo update-alternatives --set ovs-vswitchd /usr/lib/openvswitch-switch-dpdk/ovs-vswitchd-dpdk
+
+echo "DPDK_OPTS='--dpdk -c 0x1 -n 4 -m 1024 --vhost-owner docker --vhost-perm 0664'" | sudo tee -a /etc/default/openvswitch-switch
+sudo service dpdk restart
+sudo service openvswitch-switch restart
+sleep 10
+
+sudo ovs-vsctl add-br br-dpdk -- set bridge br-dpdk datapath_type=netdev
+sudo ovs-vsctl add-port br-dpdk vhost-user-1 -- set Interface vhost-user-1 type=dpdkvhostuser
+sudo ifconfig br-dpdk 192.168.3.1/24 up
+
+sudo sysctl -w vm.nr_hugepages=2048
+sudo apt-get install -y docker.io
+sudo docker build -t vpp /vagrant/
+sudo docker run -itd -v /dev/hugepages/:/dev/hugepages/ -v /var/run/openvswitch:/var/run/openvswitch -v /vagrant:/vagrant vpp /root/setup_vpp.sh
+sleep 20
+ping -c4 192.168.3.2
diff --git a/src/vnf/virtio-user-ping/setup_virtio_user.sh b/src/vnf/virtio-user-ping/setup_virtio_user.sh
new file mode 100755
index 0000000..03e987f
--- /dev/null
+++ b/src/vnf/virtio-user-ping/setup_virtio_user.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+id=$(ip a | grep dummy | cut -f2 -d"-" | cut -f1 -d":")
+sed -i "s/vhost-user-1/vhost-user-$id/" /root/startup.conf
+mkdir -p /run/vpp
+vpp -c /root/startup.conf &
+sleep 10
+chmod 777 /run/vpp/cli.sock
+vppctl set int state VirtioUser0/0/0 up
+ip=$(cut -f6 -d":" /var/run/cni/netconf-$id | cut -f1 -d" ")
+vppctl set int ip add VirtioUser0/0/0 $ip/24
+sleep 1000000
diff --git a/src/vnf/virtio-user-ping/setup_vpp.sh b/src/vnf/virtio-user-ping/setup_vpp.sh
new file mode 100755
index 0000000..eef672c
--- /dev/null
+++ b/src/vnf/virtio-user-ping/setup_vpp.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+mkdir -p /run/vpp
+vpp -c /root/startup.conf &
+sleep 10
+chmod 777 /run/vpp/cli.sock
+vppctl set int state VirtioUser0/0/0 up
+vppctl set int ip add VirtioUser0/0/0 192.168.3.2/24
+sleep 1000000
diff --git a/src/vnf/virtio-user-ping/startup.conf b/src/vnf/virtio-user-ping/startup.conf
new file mode 100644
index 0000000..ce8badc
--- /dev/null
+++ b/src/vnf/virtio-user-ping/startup.conf
@@ -0,0 +1,23 @@
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen /run/vpp/cli.sock
+}
+
+api-trace {
+ on
+}
+
+cpu {
+}
+
+plugins
+{
+ path /root/vpp/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins
+}
+
+dpdk {
+ huge-dir /dev/hugepages
+ virtio-user /var/run/openvswitch/vhost-user-1
+}
diff --git a/src/vnf/virtio-user-ping/virtio-user.patch b/src/vnf/virtio-user-ping/virtio-user.patch
new file mode 100644
index 0000000..2504785
--- /dev/null
+++ b/src/vnf/virtio-user-ping/virtio-user.patch
@@ -0,0 +1,49 @@
+diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c
+index 68c55f3..1acafcc 100755
+--- a/src/plugins/dpdk/device/init.c
++++ b/src/plugins/dpdk/device/init.c
+@@ -827,6 +827,7 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input)
+ u8 huge_dir = 0;
+ u8 file_prefix = 0;
+ u8 *socket_mem = 0;
++ u8 *virtio_user = 0;
+
+ conf->device_config_index_by_pci_addr = hash_create (0, sizeof (uword));
+ log_level = RTE_LOG_NOTICE;
+@@ -852,6 +853,17 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input)
+ else if (unformat (input, "no-multi-seg"))
+ conf->no_multi_seg = 1;
+
++
++ /* hardcode here for quick poc */
++ else if (unformat (input, "virtio-user %s", &virtio_user))
++ {
++ vec_add1 (conf->eal_init_args, (u8*)"-m 1024");
++ vec_add1 (conf->eal_init_args, (u8*)"--no-pci");
++ vec_add1 (conf->eal_init_args, (u8*)"--single-file");
++ tmp = format (0, "--vdev=virtio_user0,path=%s", virtio_user);
++ vec_add1 (conf->eal_init_args, (u8*)tmp);
++ }
++
+ else if (unformat (input, "dev default %U", unformat_vlib_cli_sub_input,
+ &sub_input))
+ {
+@@ -1168,18 +1180,6 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input)
+
+ #undef _
+
+- /* set master-lcore */
+- tmp = format (0, "--master-lcore%c", 0);
+- vec_add1 (conf->eal_init_args, tmp);
+- tmp = format (0, "%u%c", tm->main_lcore, 0);
+- vec_add1 (conf->eal_init_args, tmp);
+-
+- /* set socket-mem */
+- tmp = format (0, "--socket-mem%c", 0);
+- vec_add1 (conf->eal_init_args, tmp);
+- tmp = format (0, "%s%c", socket_mem, 0);
+- vec_add1 (conf->eal_init_args, tmp);
+-
+ /* NULL terminate the "argv" vector, in case of stupidity */
+ vec_add1 (conf->eal_init_args, 0);
+ _vec_len (conf->eal_init_args) -= 1;