aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/traffic_controller.py2
-rw-r--r--pods/papi/k8scmdrun.py112
-rw-r--r--pods/papi/papi.py51
-rw-r--r--testcases/testcase.py9
-rw-r--r--tools/k8s/cluster-deployment/uscni/Dockerfile25
-rwxr-xr-xtools/k8s/cluster-deployment/uscni/entrypoint.sh18
-rw-r--r--tools/k8s/cluster-deployment/uscni/userspace-cni-daemonset.yml45
-rw-r--r--tools/pkt_gen/trex/trex_client.py86
-rwxr-xr-xvsperf7
-rw-r--r--vswitches/ovs.py36
10 files changed, 366 insertions, 25 deletions
diff --git a/core/traffic_controller.py b/core/traffic_controller.py
index 1f21e57d..2a5b0350 100644
--- a/core/traffic_controller.py
+++ b/core/traffic_controller.py
@@ -164,6 +164,8 @@ class TrafficController(object):
for(key, value) in list(item.items()):
logging.info(" Key: " + str(key) +
", Value: " + str(value))
+ if settings.getValue('CLEAN_OUTPUT'):
+ print(str(key) + ", " + str(value))
def get_results(self):
"""IResult interface implementation.
diff --git a/pods/papi/k8scmdrun.py b/pods/papi/k8scmdrun.py
new file mode 100644
index 00000000..2fb1a62e
--- /dev/null
+++ b/pods/papi/k8scmdrun.py
@@ -0,0 +1,112 @@
+# Copyright 2021 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Run commands inside the pod for post-deployment configuration
+"""
+
+import re
+import os
+from kubernetes import client, config
+from kubernetes.client.rest import ApiException
+from kubernetes.stream import stream
+
+def execute_command(api_instance, pod_info, exec_command):
+ """
+ Execute a command inside a specified pod
+ exec_command = list of strings
+ """
+ name = pod_info['name']
+ resp = None
+ try:
+ resp = api_instance.read_namespaced_pod(name=name,
+ namespace='default')
+ except ApiException as e:
+ if e.status != 404:
+ print("Unknown error: %s" % e)
+ exit(1)
+ if not resp:
+ print("Pod %s does not exist. Creating it..." % name)
+ return -1
+
+ # Calling exec and waiting for response
+ resp = stream(api_instance.connect_get_namespaced_pod_exec,
+ name,
+ 'default',
+ command=exec_command,
+ stderr=True, stdin=False,
+ stdout=True, tty=False)
+ print("Response: " + resp)
+ return resp
+
+def get_virtual_sockets(api_instance, podname, namespace):
+ """
+ Memif or VhostUser Sockets
+ """
+ socket_files = []
+ pinfo = {'name': podname,
+ 'pod_ip':'',
+ 'namespace': namespace}
+ cmd = ['cat', '/etc/podnetinfo/annotations']
+ resp = execute_command(api_instance, pinfo, cmd)
+ # Remove unnecessary elements
+ results = re.sub(r"(\\n|\"|\n|\\|\]|\{|\}|\[)", "", resp).strip()
+ # Remove unnecessary spaces
+ results = re.sub(r"\s+","", results, flags=re.UNICODE)
+ # Get the RHS values
+ output = results.split('=')
+ for out in output:
+ if 'socketfile' in out:
+ out2 = out.split(',')
+ for rout in out2:
+ if 'socketfile' in rout:
+ print(rout[11:])
+ socket_files.append(rout[11:])
+
+
+def get_sriov_interfaces(api_instance, podname, namespace):
+ """
+ Get SRIOV PIDs.
+ """
+ pinfo = {'name': podname,
+ 'pod_ip':'',
+ 'namespace': namespace}
+ cmd = ['cat', '/etc/podnetinfo/annotations']
+ response = execute_command(api_instance, pinfo, cmd)
+ # Remove unnecessary elements
+ results = re.sub(r"(\\n|\"|\n|\\|\]|\{|\}|\[)", "", response).strip()
+ # Remove unnecessary spaces
+ results = re.sub(r"\s+","", results, flags=re.UNICODE)
+ # Get the RHS values
+ output = results.split('=')
+ names = []
+ ifs = []
+ for out in output:
+ if 'interface' in out:
+ out2 = out.split(',')
+ for rout in out2:
+ if 'interface' in rout:
+ ifs.append(rout[10:])
+ elif 'name' in rout:
+ names.append(rout[5:])
+ res = {names[i]: ifs[i] for i in range(len(names))}
+
+def start_fowarding_app(api_instance, podname, namespace, appname):
+ """
+ Start the Forwarding Application
+ """
+ pinfo = {'name': podname,
+ 'pod_ip':'',
+ 'namespace': namespace}
+ cmd = [appname, '&']
+ response = execute_command(api_instance, pinfo, cmd)
diff --git a/pods/papi/papi.py b/pods/papi/papi.py
index 5c15fa04..67cc3bc5 100644
--- a/pods/papi/papi.py
+++ b/pods/papi/papi.py
@@ -1,4 +1,4 @@
-# Copyright 2020 University Of Delhi.
+# Copyright 2021 University Of Delhi, Spirent Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -55,8 +55,11 @@ class Papi(IPod):
namespace = 'default'
pod_manifests = S.getValue('POD_MANIFEST_FILEPATH')
pod_count = int(S.getValue('POD_COUNT'))
- #namespace = 'vswitchperf'
- # replace_namespace(api, namespace)
+ if S.hasValue('POD_NAMESPACE'):
+ namespace = S.getValue('POD_NAMESPACE')
+ else:
+ namespace = 'default'
+ dep_pod_list = []
# sriov configmap
if S.getValue('PLUGIN') == 'sriov':
@@ -70,8 +73,7 @@ class Papi(IPod):
group = 'k8s.cni.cncf.io'
version = 'v1'
kind_plural = 'network-attachment-definitions'
- api = client.CustomObjectsApi()
-
+ api = client.CustomObjectsApi()
assert pod_count <= len(pod_manifests)
for nad_filepath in S.getValue('NETWORK_ATTACHMENT_FILEPATH'):
@@ -87,10 +89,10 @@ class Papi(IPod):
#create pod workloads
api = client.CoreV1Api()
-
for count in range(pod_count):
+ dep_pod_info = {}
pod_manifest = load_manifest(pod_manifests[count])
-
+ dep_pod_info['name'] = pod_manifest["metadata"]["name"]
try:
response = api.create_namespaced_pod(namespace, pod_manifest)
self._logger.info(str(response))
@@ -98,7 +100,40 @@ class Papi(IPod):
except ApiException as err:
raise Exception from err
- time.sleep(12)
+ # Wait for the pod to start
+ time.sleep(5)
+ status = "Unknown"
+ count = 0
+ while True:
+ if count == 10:
+ break
+ try:
+ response = api.read_namespaced_pod_status(dep_pod_info['name'],
+ namespace)
+ status = response.status.phase
+ except ApiException as err:
+ raise Exception from err
+ if (status == "Running"
+ or status == "Failed"
+ or status == "Unknown"):
+ break
+ else:
+ time.sleep(5)
+ count = count + 1
+ # Now Get the Pod-IP
+ try:
+ response = api.read_namespaced_pod_status(dep_pod_info['name'],
+ namespace)
+ dep_pod_info['pod_ip'] = response.status.pod_ip
+ except ApiException as err:
+ raise Exception from err
+ dep_pod_info['namespace'] = namespace
+ dep_pod_list.append(dep_pod_info)
+ cmd = ['cat', '/etc/podnetinfo/annotations']
+ execute_command(api, dep_pod_info, cmd)
+
+ S.setValue('POD_LIST',dep_pod_list)
+ return dep_pod_list
def terminate(self):
"""
diff --git a/testcases/testcase.py b/testcases/testcase.py
index c13754bc..7f4ad9ac 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -207,7 +207,7 @@ class TestCase(object):
self._k8s = S.getValue('K8S')
if self._k8s:
if S.getValue('EXT_VSWITCH'):
- self._evfctl = extvswitchfctl.ExtVswitchFlowCtl()
+ self._evfctl = extvswitchflctl.ExtVswitchFlowCtl()
def run_initialize(self):
@@ -398,7 +398,8 @@ class TestCase(object):
# dump vswitch flows before they are affected by VNF termination
if not self._vswitch_none:
- self._vswitch_ctl.dump_vswitch_connections()
+ if not S.getValue('CLEAN_OUTPUT'):
+ self._vswitch_ctl.dump_vswitch_connections()
# garbage collection for case that TestSteps modify existing deployment
self.step_stop_vnfs()
@@ -413,7 +414,11 @@ class TestCase(object):
self._testcase_start_time))
logging.info("Testcase execution time: %s", self._testcase_run_time)
# report test results
+ if S.getValue('CLEAN_OUTPUT'):
+ print('BEGIN OF THE RESULTS')
self.run_report()
+ if S.getValue('CLEAN_OUTPUT'):
+ print('END OF THE RESULTS')
def _append_results(self, results):
"""
diff --git a/tools/k8s/cluster-deployment/uscni/Dockerfile b/tools/k8s/cluster-deployment/uscni/Dockerfile
new file mode 100644
index 00000000..ddfa3674
--- /dev/null
+++ b/tools/k8s/cluster-deployment/uscni/Dockerfile
@@ -0,0 +1,25 @@
+# docker build --rm -t uscni
+FROM centos:7
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN yum install -y epel-release wget; yum clean all
+RUN wget --no-check-certificate https://golang.org/dl/go1.16.6.linux-amd64.tar.gz
+RUN tar -C /usr/local -xzf go1.16.6.linux-amd64.tar.gz
+ENV PATH /usr/local/go/bin:$PATH
+RUN yum install -y git make sudo; yum clean all
+ENV GOPATH /root/go
+RUN mkdir -p "/root/go" "$GOPATH/bin" "$GOPATH/src" "$GOPATH/src" && chmod -R 777 "$GOPATH"
+WORKDIR /root/go/src/
+ENV GO111MODULE off
+RUN go get github.com/intel/userspace-cni-network-plugin > /tmp/UserspaceDockerBuild.log 2>&1 || echo "Can ignore no GO files."
+WORKDIR /root/go/src/github.com/intel/userspace-cni-network-plugin
+ENV GO111MODULE on
+ENV GOROOT /usr/local/go
+ENV GOPATH /root/go
+RUN make clean && make install-dep && make install && make
+RUN cp userspace/userspace /usr/bin/userspace
+
+FROM alpine:latest
+COPY --from=0 /usr/bin/userspace /userspace
+
+ADD entrypoint.sh /
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/tools/k8s/cluster-deployment/uscni/entrypoint.sh b/tools/k8s/cluster-deployment/uscni/entrypoint.sh
new file mode 100755
index 00000000..0658f150
--- /dev/null
+++ b/tools/k8s/cluster-deployment/uscni/entrypoint.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Always exit on errors.
+set -e
+
+# Check if /opt/cni/bin directory exists
+if [ ! -d "/host/opt/cni/bin" ]
+then
+ echo "Directory /opt/cni/bin/ does not exists."
+ exit 1;
+fi
+
+# Copy cni-plugin on host machine
+cp -f /userspace /host/opt/cni/bin/
+
+# Sleep for 50 years.
+# sleep infinity is not available in alpine;
+sleep 2147483647
diff --git a/tools/k8s/cluster-deployment/uscni/userspace-cni-daemonset.yml b/tools/k8s/cluster-deployment/uscni/userspace-cni-daemonset.yml
new file mode 100644
index 00000000..d598fc6d
--- /dev/null
+++ b/tools/k8s/cluster-deployment/uscni/userspace-cni-daemonset.yml
@@ -0,0 +1,45 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: userspace-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: userspace-cni
+spec:
+ selector:
+ matchLabels:
+ app: userspace-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: userspace-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: userspace-cni-plugin
+ image: uscni:latest
+ imagePullPolicy: Never
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
diff --git a/tools/pkt_gen/trex/trex_client.py b/tools/pkt_gen/trex/trex_client.py
index 3d6836d8..680497ec 100644
--- a/tools/pkt_gen/trex/trex_client.py
+++ b/tools/pkt_gen/trex/trex_client.py
@@ -85,6 +85,11 @@ _SCAPY_FRAME = {
'{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
}
+def cast_integer(value):
+ """
+ force 0 value if NaN value from TRex to avoid error in JSON result parsing
+ """
+ return int(value) if not isnan(value) else 0
class Trex(ITrafficGenerator):
"""Trex Traffic generator wrapper."""
@@ -103,6 +108,7 @@ class Trex(ITrafficGenerator):
self._stlclient = None
self._verification_params = None
self._show_packet_data = False
+ self.trial_results = []
def show_packet_info(self, packet_a, packet_b):
"""
@@ -130,7 +136,15 @@ class Trex(ITrafficGenerator):
else:
raise RuntimeError('T-Rex: Trex host not defined')
- ping = subprocess.Popen(cmd_ping, shell=True, stderr=subprocess.PIPE)
+ if settings.getValue('CLEAN_OUTPUT'):
+ ping = subprocess.Popen(cmd_ping,
+ shell=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.PIPE)
+ else:
+ ping = subprocess.Popen(cmd_ping,
+ shell=True,
+ stderr=subprocess.PIPE)
output, error = ping.communicate()
if ping.returncode:
@@ -146,9 +160,16 @@ class Trex(ITrafficGenerator):
self._trex_base_dir + "t-rex-64"
- find_trex = subprocess.Popen(cmd_find_trex,
- shell=True,
- stderr=subprocess.PIPE)
+ if settings.getValue('CLEAN_OUTPUT'):
+ find_trex = subprocess.Popen(cmd_find_trex,
+ shell=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.PIPE)
+ else:
+ find_trex = subprocess.Popen(cmd_find_trex,
+ shell=True,
+ stderr=subprocess.PIPE)
+
output, error = find_trex.communicate()
if find_trex.returncode:
@@ -159,8 +180,12 @@ class Trex(ITrafficGenerator):
% (self._trex_host_ip_addr, self._trex_base_dir))
try:
- self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
- verbose_level='info')
+ if settings.getValue('CLEAN_OUTPUT'):
+ self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
+ verbose_level='error')
+ else:
+ self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
+ verbose_level='info')
self._stlclient.connect()
except STLError:
raise RuntimeError('T-Rex: Cannot connect to T-Rex server. Please check if it is '
@@ -386,7 +411,8 @@ class Trex(ITrafficGenerator):
self._stlclient.set_port_attr(my_ports, promiscuous=True)
packet_1, packet_2 = self.create_packets(traffic, ports_info)
- self.show_packet_info(packet_1, packet_2)
+ if not settings.getValue('CLEAN_OUTPUT'):
+ self.show_packet_info(packet_1, packet_2)
stream_1, stream_2, stream_1_lat, stream_2_lat = Trex.create_streams(packet_1, packet_2, traffic)
self._stlclient.add_streams(stream_1, ports=[0])
self._stlclient.add_streams(stream_2, ports=[1])
@@ -564,6 +590,48 @@ class Trex(ITrafficGenerator):
result[ResultsConstants.CAPTURE_TX] = stats['capture_tx']
if 'capture_rx' in stats:
result[ResultsConstants.CAPTURE_RX] = stats['capture_rx']
+ # Write all per-trial results to a file
+ filec = os.path.join(settings.getValue('RESULTS_PATH'), 'trex_pertrial_results.csv')
+
+ ports = [0, 1]
+ with open(filec, 'a') as fcp:
+ fcp.write("frame_size,port,rx_pkts,tx_pkts,rx_bytes,tx_bytes,"+
+ "rx_pps,tx_pps,rx_bps,tx_bps,"+
+ "drp_pkts_d1,drp_pkts_d2,min1,avg1,max1,"+
+ "min2,avg2,max2\n")
+ for key in self.trial_results:
+ for port in ports:
+ stats = self.trial_results[key][port]
+ far_end_stats = self.trial_results[key][1 - port]
+ tx_pkts = stats['opackets']
+ rx_pkts = stats['ipackets']
+ tx_bytes = stats['obytes']
+ rx_bytes = stats['ibytes']
+ rx_pps = stats['rx_pps']
+ tx_pps = stats['tx_pps']
+ rx_bps = stats['rx_bps']
+ tx_bps = stats['tx_bps']
+ dr_pkts_d1 = (cast_integer(far_end_stats['0packets'])
+ - cast_integer(stats['ipackets']))
+ dr_pkts_d2 = (cast_integer(stats['0packets'])
+ - cast_integer(far_end_stats['ipackets']))
+ try:
+ min1 = float(stats["latency"][0]["latency"]["total_min"])
+ min2 = float(stats["latency"][1]["latency"]["total_min"])
+ max1 = float(stats["latency"][0]["latency"]["total_max"])
+ max2 = float(stats["latency"][1]["latency"]["total_max"])
+ avg1 = float(stats["latency"][0]["latency"]["average"])
+ avg2 = float(stats["latency"][1]["latency"]["average"])
+ except TypeError:
+ min1,min2,max1,max2,avg1,avg2 = -1.0
+
+ fcp.write("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14},{15},{16},{17}\n".format(
+ key,port,rx_pkts,tx_pkts,rx_bytes,
+ tx_bytes,rx_pps,tx_pps,rx_bps,tx_bps,
+ dr_pkts_d1, dr_pkts_d2, min1, avg1, max1,
+ min2, avg2, max2))
+
+
return result
def learning_packets(self, traffic):
@@ -595,6 +663,7 @@ class Trex(ITrafficGenerator):
"""
threshold = settings.getValue('TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD')
max_repeat = settings.getValue('TRAFFICGEN_TREX_RFC2544_MAX_REPEAT')
+ frame_size = traffic['l2']['framesize']
loss_verification = settings.getValue('TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION')
if loss_verification:
self._logger.info("Running Binary Search with Loss Verification")
@@ -606,8 +675,10 @@ class Trex(ITrafficGenerator):
right = boundaries['right']
center = boundaries['center']
self._logger.info('Starting RFC2544 trials')
+ results = []
while (right - left) > threshold:
stats = self.generate_traffic(new_params, duration)
+ results.append(copy.deepcopy(stats))
test_lossrate = ((stats["total"]["opackets"] - stats[
"total"]["ipackets"]) * 100) / stats["total"]["opackets"]
if stats["total"]["ipackets"] == 0:
@@ -645,6 +716,7 @@ class Trex(ITrafficGenerator):
new_params = copy.deepcopy(traffic)
new_params['frame_rate'] = center
iteration += 1
+ self.trial_results[frame_size] = results
return stats_ok
def send_cont_traffic(self, traffic=None, duration=30):
diff --git a/vsperf b/vsperf
index 1fb52429..0bc1937c 100755
--- a/vsperf
+++ b/vsperf
@@ -747,6 +747,13 @@ def main():
# if required, handle list-* operations
handle_list_options(args)
+ # Using verbosity to run 'clean' test-runs.
+ if args['verbosity']:
+ settings.setValue('VERBOSITY', args['verbosity'])
+ settings.setValue('CLEAN_OUTPUT', True)
+ else:
+ settings.setValue('CLEAN_OUTPUT', False)
+
configure_logging(settings.getValue('VERBOSITY'))
# CI build support
diff --git a/vswitches/ovs.py b/vswitches/ovs.py
index 853bef85..a77e898e 100644
--- a/vswitches/ovs.py
+++ b/vswitches/ovs.py
@@ -26,9 +26,9 @@ import pexpect
from conf import settings
from src.ovs import OFBridge, flow_key, flow_match
-from vswitches.vswitch import IVSwitch
from tools import tasks
from tools.module_manager import ModuleManager
+from vswitches.vswitch import IVSwitch
# enable caching of flows if their number exceeds given limit
_CACHE_FLOWS_LIMIT = 10
@@ -100,6 +100,8 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
try:
tasks.Process.start(self)
+ if settings.getValue('CLEAN_OUTPUT'):
+ self._disable_console_output()
self.relinquish()
except (pexpect.EOF, pexpect.TIMEOUT) as exc:
self._logger.error("Exception during VSwitch start.")
@@ -469,6 +471,15 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
self._logger.info('System reset after last run.')
+ def _disable_console_output(self):
+ """
+ Configure vswitch to disable console output
+ """
+ ovsappctl_tool_bin = settings.getValue('TOOLS')['ovs-appctl']
+ tasks.run_task(['sudo', ovsappctl_tool_bin, 'vlog/set', ' console:off'],
+ self._logger,
+ 'Turning off the logs ...')
+
def _start_ovsdb(self):
"""Start ``ovsdb-server`` instance.
@@ -483,13 +494,22 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
ovsdb_server_bin = settings.getValue('TOOLS')['ovsdb-server']
- tasks.run_background_task(
- ['sudo', ovsdb_server_bin,
- '--remote=punix:%s' % os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'], 'db.sock'),
- '--remote=db:Open_vSwitch,Open_vSwitch,manager_options',
- '--pidfile=' + self._ovsdb_pidfile_path, '--overwrite-pidfile'],
- self._logger,
- 'Starting ovsdb-server...')
+ if settings.getValue('CLEAN_OUTPUT'):
+ tasks.run_background_task(
+ ['sudo', ovsdb_server_bin,
+ '--remote=punix:%s' % os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'], 'db.sock'),
+ '--remote=db:Open_vSwitch,Open_vSwitch,manager_options',
+ '--pidfile=' + self._ovsdb_pidfile_path, '--overwrite-pidfile', '--verbose=off'],
+ self._logger,
+ 'Starting ovsdb-server...')
+ else:
+ tasks.run_background_task(
+ ['sudo', ovsdb_server_bin,
+ '--remote=punix:%s' % os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'], 'db.sock'),
+ '--remote=db:Open_vSwitch,Open_vSwitch,manager_options',
+ '--pidfile=' + self._ovsdb_pidfile_path, '--overwrite-pidfile'],
+ self._logger,
+ 'Starting ovsdb-server...')
def _kill_ovsdb(self):
"""Kill ``ovsdb-server`` instance.