summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml8
-rw-r--r--clover/collector/__init__.py0
-rwxr-xr-xclover/collector/build.sh16
-rw-r--r--clover/collector/db/__init__.py0
-rw-r--r--clover/collector/db/cassops.py144
-rw-r--r--clover/collector/db/redisops.py59
-rw-r--r--clover/collector/docker/Dockerfile30
-rw-r--r--clover/collector/grpc/__init__.py0
-rwxr-xr-xclover/collector/grpc/build_proto.sh11
-rw-r--r--clover/collector/grpc/collector.proto45
-rw-r--r--clover/collector/grpc/collector_client.py105
-rw-r--r--clover/collector/grpc/collector_pb2.py300
-rw-r--r--clover/collector/grpc/collector_pb2_grpc.py97
-rw-r--r--clover/collector/grpc/collector_server.py98
-rw-r--r--clover/collector/process/__init__.py0
-rw-r--r--clover/collector/process/collect.py162
-rwxr-xr-xclover/collector/process/grpc_process.sh11
-rw-r--r--clover/collector/yaml/manifest.template43
-rw-r--r--clover/collector/yaml/render_yaml.py73
-rw-r--r--clover/logging/install/elasticsearch-statefulset-service.yaml129
-rw-r--r--clover/logging/install/fluentd-daemonset-elasticsearch-rbac.yaml96
-rw-r--r--clover/logging/install/proxy-access-control-sidecar.yml32
-rw-r--r--clover/monitoring/monitoring.py5
-rw-r--r--clover/test/fraser_a_b_test.py2
-rw-r--r--docker/Dockerfile2
-rw-r--r--docs/development/design/logging.rst36
-rw-r--r--docs/release/configguide/sdc_config_guide.rst2
-rw-r--r--samples/services/nginx/docker/subservices/lb/Dockerfile2
-rw-r--r--samples/services/nginx/docker/subservices/proxy/Dockerfile2
-rw-r--r--samples/services/nginx/docker/subservices/server/Dockerfile2
-rw-r--r--samples/services/snort_ids/docker/Dockerfile2
-rwxr-xr-xxci-k8s-setup.sh40
32 files changed, 1534 insertions, 20 deletions
diff --git a/INFO.yaml b/INFO.yaml
index abe5ba5..12700e1 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -4,6 +4,10 @@ project_creation_date: ''
project_category: ''
lifecycle_state: 'Incubation'
project_lead: &opnfv_clover_ptl
+ name: 'Wenjing Chu'
+ email: 'chu.wenjing@gmail.com'
+ company: 'gmail.com'
+ id: 'wenjing'
primary_contact: *opnfv_clover_ptl
issue_tracking:
type: 'jira'
@@ -29,10 +33,6 @@ repositories:
- 'clover'
committers:
- <<: *opnfv_clover_ptl
- - name: 'Wenjing Chu'
- email: 'chu.wenjing@gmail.com'
- company: 'gmail.com'
- id: 'wenjing'
- name: 'Stephen Wong'
email: 'stephen.kf.wong@gmail.com'
company: 'gmail.com'
diff --git a/clover/collector/__init__.py b/clover/collector/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clover/collector/__init__.py
diff --git a/clover/collector/build.sh b/clover/collector/build.sh
new file mode 100755
index 0000000..f305a02
--- /dev/null
+++ b/clover/collector/build.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+IMAGE_PATH=${IMAGE_PATH:-"localhost:5000"}
+IMAGE_NAME=${IMAGE_NAME:-"clover-collector"}
+
+docker build -f docker/Dockerfile -t $IMAGE_NAME .
+docker tag $IMAGE_NAME $IMAGE_PATH/$IMAGE_NAME
+docker push $IMAGE_PATH/$IMAGE_NAME
diff --git a/clover/collector/db/__init__.py b/clover/collector/db/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clover/collector/db/__init__.py
diff --git a/clover/collector/db/cassops.py b/clover/collector/db/cassops.py
new file mode 100644
index 0000000..6553cff
--- /dev/null
+++ b/clover/collector/db/cassops.py
@@ -0,0 +1,144 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+from cassandra.cluster import Cluster
+from cassandra.query import BatchStatement
+import logging
+
+CASSANDRA_HOSTS = ['cassandra.default']
+
+
+class CassandraOps:
+
+ def __init__(self, hosts, port=9042, keyspace='visibility'):
+ logging.basicConfig(filename='cassops.log',
+ level=logging.DEBUG)
+ cluster = Cluster(hosts, port=port)
+ self.session = cluster.connect()
+ self.keyspace = keyspace
+
+ def truncate(self, tables=['traces', 'metrics', 'spans']):
+ self.session.set_keyspace(self.keyspace)
+ try:
+ for table in tables:
+ self.session.execute("""
+ TRUNCATE %s
+ """ % table)
+ except Exception as e:
+ logging.debug(e)
+
+ def init_visibility(self):
+ try:
+ self.session.execute("""
+ CREATE KEYSPACE %s
+ WITH replication = { 'class': 'SimpleStrategy',
+ 'replication_factor': '1' }
+ """ % self.keyspace)
+ except Exception as e:
+ logging.debug(e)
+
+ self.session.set_keyspace(self.keyspace)
+
+ try:
+ self.session.execute("""
+ CREATE TABLE IF NOT EXISTS traces (
+ traceid text,
+ processes list<text>,
+ PRIMARY KEY (traceid)
+ )
+ """)
+
+ self.session.execute("""
+ CREATE TABLE IF NOT EXISTS spans (
+ spanid text,
+ traceid text,
+ duration int,
+ start_time int,
+ processid text,
+ operation_name text,
+ node_id text,
+ http_url text,
+ upstream_cluster text,
+ PRIMARY KEY (spanid, traceid)
+ )
+ """)
+
+ self.session.execute("""
+ CREATE TABLE IF NOT EXISTS metrics (
+ m_name text,
+ m_value text,
+ m_time text,
+ service text,
+ monitor_time timestamp,
+ PRIMARY KEY (m_name, monitor_time)
+ )
+ """)
+ except Exception as e:
+ logging.debug(e)
+
+ def set_prepared(self):
+ self.session.set_keyspace(self.keyspace)
+ self.insert_tracing_stmt = self.session.prepare(
+ """
+ INSERT INTO spans (spanid, traceid, duration, operation_name,
+ node_id, http_url, upstream_cluster)
+ VALUES (?, ?, ?, ?, ?, ?, ?)
+ """
+ )
+ self.insert_metric_stmt = self.session.prepare(
+ """
+ INSERT INTO metrics
+ (m_name, m_value, m_time, service, monitor_time)
+ VALUES (?, ?, ?, ?, toTimestamp(now()))
+ """
+ )
+
+ def set_batch(self):
+ self.batch = BatchStatement()
+
+ def execute_batch(self):
+ self.session.execute(self.batch)
+
+ def insert_tracing(self, table, traceid, s, tags):
+ self.session.set_keyspace(self.keyspace)
+ if 'upstream_cluster' not in tags:
+ logging.debug('NO UPSTREAM_CLUSTER KEY')
+ tags['upstream_cluster'] = 'none'
+ try:
+ self.batch.add(self.insert_tracing_stmt,
+ (s['spanID'], traceid, s['duration'],
+ s['operationName'], tags['node_id'],
+ tags['http.url'], tags['upstream_cluster']))
+ except Exception as e:
+ logging.debug('{} {} {} {} {} {} {}'.format(s['spanID'], traceid,
+ s['duration'], s['operationName'], tags['node_id'],
+ tags['http.url'], tags['upstream_cluster']))
+ logging.debug(e)
+
+ def insert_trace(self, traceid, processes):
+ self.session.set_keyspace(self.keyspace)
+ self.session.execute(
+ """
+ INSERT INTO traces (traceid, processes)
+ VALUES (%s, %s)
+ """,
+ (traceid, processes)
+ )
+
+ def insert_metric(self, m_name, m_value, m_time, service):
+ self.session.set_keyspace(self.keyspace)
+ self.batch.add(self.insert_metric_stmt,
+ (m_name, m_value, m_time, service))
+
+
+def main():
+ cass = CassandraOps(CASSANDRA_HOSTS)
+ cass.init_visibility()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/clover/collector/db/redisops.py b/clover/collector/db/redisops.py
new file mode 100644
index 0000000..e80c417
--- /dev/null
+++ b/clover/collector/db/redisops.py
@@ -0,0 +1,59 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import redis
+import logging
+
+REDIS_HOST = 'redis'
+# REDIS_HOST = '10.244.0.85'
+
+
+class RedisOps:
+
+ def __init__(self, host=REDIS_HOST):
+ logging.basicConfig(filename='redisops.log',
+ level=logging.DEBUG)
+ try:
+ self.r = redis.StrictRedis(host=host, port=6379, db=0)
+ except Exception as e:
+ logging.debug(e)
+
+ def init_services(self, skey='visibility_services'):
+ service_names = ['http_lb', 'proxy_access_control']
+ for s in service_names:
+ self.r.sadd(skey, s)
+
+ def init_metrics(self, pkey='metric_prefixes', skey='metric_suffixes'):
+ metric_prefixes = ['envoy_cluster_out_', 'envoy_cluster_in_']
+ metric_suffixes = [
+ '_default_svc_cluster_local_http_internal_upstream_rq_2xx',
+ '_default_svc_cluster_local_http_upstream_cx_active']
+ for p in metric_prefixes:
+ self.r.sadd(pkey, p)
+ for s in metric_suffixes:
+ self.r.sadd(skey, s)
+
+ def get_services(self, skey='visibility_services'):
+ services = self.r.smembers(skey)
+ return services
+
+ def get_metrics(self, pkey='metric_prefixes', skey='metric_suffixes'):
+ prefixes = self.r.smembers(pkey)
+ suffixes = self.r.smembers(skey)
+ return prefixes, suffixes
+
+
+def main():
+ r = RedisOps()
+ r.init_services()
+ r.init_metrics()
+ r.get_services()
+ r.get_metrics()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/clover/collector/docker/Dockerfile b/clover/collector/docker/Dockerfile
new file mode 100644
index 0000000..1714420
--- /dev/null
+++ b/clover/collector/docker/Dockerfile
@@ -0,0 +1,30 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+FROM opnfv/clover
+
+ENV REPOS_DIR="/home/opnfv/repos"
+
+# Clover repo
+ENV CLOVER_REPO_DIR="${REPOS_DIR}/clover"
+
+# Install required python packages
+RUN python -m pip install cassandra-driver redis
+
+# Set work directory
+WORKDIR ${CLOVER_REPO_DIR}
+
+COPY /process clover/collector/process
+COPY /grpc clover/collector/grpc
+COPY /db clover/collector/db
+COPY __init__.py clover/collector/__init__.py
+
+RUN pip install .
+
+WORKDIR "${CLOVER_REPO_DIR}/clover/collector"
+
+CMD ./process/grpc_process.sh no_schema_init
diff --git a/clover/collector/grpc/__init__.py b/clover/collector/grpc/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clover/collector/grpc/__init__.py
diff --git a/clover/collector/grpc/build_proto.sh b/clover/collector/grpc/build_proto.sh
new file mode 100755
index 0000000..44467ad
--- /dev/null
+++ b/clover/collector/grpc/build_proto.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+python -m grpc_tools.protoc -I./ --python_out=. --grpc_python_out=. collector.proto
diff --git a/clover/collector/grpc/collector.proto b/clover/collector/grpc/collector.proto
new file mode 100644
index 0000000..fc8b636
--- /dev/null
+++ b/clover/collector/grpc/collector.proto
@@ -0,0 +1,45 @@
+// Copyright (c) Authors of Clover
+//
+// All rights reserved. This program and the accompanying materials
+// are made available under the terms of the Apache License, Version 2.0
+// which accompanies this distribution, and is available at
+// http://www.apache.org/licenses/LICENSE-2.0
+
+syntax = "proto3";
+
+package collector;
+
+// The controller service definition.
+service Controller {
+
+ rpc StopCollector (ConfigCollector) returns (CollectorReply) {}
+ rpc StartCollector (ConfigCollector) returns (CollectorReply) {}
+ rpc InitVisibility (ConfigCassandra) returns (CollectorReply) {}
+ rpc TruncateVisibility (Schemas) returns (CollectorReply) {}
+}
+
+message ConfigCassandra {
+ string cassandra_hosts = 1;
+ int32 cassandra_port = 2;
+}
+
+message ConfigCollector {
+ string t_port = 1;
+ string t_host = 2;
+ string m_port = 3;
+ string m_host = 4;
+ string c_port = 5;
+ string c_hosts = 6;
+ string sinterval = 7;
+}
+
+message Schemas {
+ string schemas = 1;
+ string cassandra_hosts = 2;
+ int32 cassandra_port = 3;
+
+}
+
+message CollectorReply {
+ string message = 1;
+}
diff --git a/clover/collector/grpc/collector_client.py b/clover/collector/grpc/collector_client.py
new file mode 100644
index 0000000..b9e9f67
--- /dev/null
+++ b/clover/collector/grpc/collector_client.py
@@ -0,0 +1,105 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+from __future__ import print_function
+from kubernetes import client, config
+
+import grpc
+import argparse
+import pickle
+
+import collector_pb2
+import collector_pb2_grpc
+
+# This is a basic client script to test server GRPC messaging
+# TODO improve interface overall
+
+
+def run(args, grpc_port='50054'):
+ pod_ip = get_podip('clover-collector')
+ if pod_ip == '':
+ return "Can not find service: {}".format(args['service_name'])
+ collector_grpc = pod_ip + ':' + grpc_port
+ channel = grpc.insecure_channel(collector_grpc)
+ stub = collector_pb2_grpc.ControllerStub(channel)
+
+ if args['cmd'] == 'init':
+ return init_visibility(stub)
+ elif args['cmd'] == 'start':
+ return start_collector(stub)
+ elif args['cmd'] == 'stop':
+ return stop_collector(stub)
+ elif args['cmd'] == 'clean':
+ return clean_visibility(stub)
+ else:
+ return "Invalid command: {}".format(args['cmd'])
+
+
+def get_podip(pod_name):
+ ip = ''
+ if pod_name != '':
+ config.load_kube_config()
+ v1 = client.CoreV1Api()
+ ret = v1.list_pod_for_all_namespaces(watch=False)
+ for i in ret.items:
+ if i.metadata.name.lower().find(pod_name.lower()) != -1:
+ print("Pod IP: {}".format(i.status.pod_ip))
+ ip = i.status.pod_ip
+ return str(ip)
+ return str(ip)
+
+
+def init_visibility(stub):
+ try:
+ cassandra_hosts = pickle.dumps(['cassandra.default'])
+ response = stub.InitVisibility(collector_pb2.ConfigCassandra(
+ cassandra_hosts=cassandra_hosts, cassandra_port=9042))
+ except Exception as e:
+ return e
+ return response.message
+
+
+def clean_visibility(stub):
+ try:
+ cassandra_hosts = pickle.dumps(['cassandra.default'])
+ schemas = pickle.dumps(['spans', 'traces', 'metrics'])
+ response = stub.TruncateVisibility(collector_pb2.Schemas(
+ schemas=schemas, cassandra_hosts=cassandra_hosts,
+ cassandra_port=9042))
+ except Exception as e:
+ return e
+ return response.message
+
+
+def start_collector(stub):
+ try:
+ cassandra_hosts = pickle.dumps(['cassandra.default'])
+ response = stub.StartCollector(collector_pb2.ConfigCollector(
+ t_port='16686', t_host='jaeger-deployment.istio-system',
+ m_port='9090', m_host='prometheus.istio-system',
+ c_port='9042', c_hosts=cassandra_hosts,
+ sinterval='5'))
+ except Exception as e:
+ return e
+ return response.message
+
+
+def stop_collector(stub):
+ try:
+ response = stub.StopCollector(collector_pb2.ConfigCollector())
+ except Exception as e:
+ return e
+ return response.message
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--cmd', required=True,
+ help='Command to execute in collector')
+ args = parser.parse_args()
+ print(run(vars(args)))
diff --git a/clover/collector/grpc/collector_pb2.py b/clover/collector/grpc/collector_pb2.py
new file mode 100644
index 0000000..f67c880
--- /dev/null
+++ b/clover/collector/grpc/collector_pb2.py
@@ -0,0 +1,300 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: collector.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='collector.proto',
+ package='collector',
+ syntax='proto3',
+ serialized_pb=_b('\n\x0f\x63ollector.proto\x12\tcollector\"B\n\x0f\x43onfigCassandra\x12\x17\n\x0f\x63\x61ssandra_hosts\x18\x01 \x01(\t\x12\x16\n\x0e\x63\x61ssandra_port\x18\x02 \x01(\x05\"\x85\x01\n\x0f\x43onfigCollector\x12\x0e\n\x06t_port\x18\x01 \x01(\t\x12\x0e\n\x06t_host\x18\x02 \x01(\t\x12\x0e\n\x06m_port\x18\x03 \x01(\t\x12\x0e\n\x06m_host\x18\x04 \x01(\t\x12\x0e\n\x06\x63_port\x18\x05 \x01(\t\x12\x0f\n\x07\x63_hosts\x18\x06 \x01(\t\x12\x11\n\tsinterval\x18\x07 \x01(\t\"K\n\x07Schemas\x12\x0f\n\x07schemas\x18\x01 \x01(\t\x12\x17\n\x0f\x63\x61ssandra_hosts\x18\x02 \x01(\t\x12\x16\n\x0e\x63\x61ssandra_port\x18\x03 \x01(\x05\"!\n\x0e\x43ollectorReply\x12\x0f\n\x07message\x18\x01 \x01(\t2\xb3\x02\n\nController\x12H\n\rStopCollector\x12\x1a.collector.ConfigCollector\x1a\x19.collector.CollectorReply\"\x00\x12I\n\x0eStartCollector\x12\x1a.collector.ConfigCollector\x1a\x19.collector.CollectorReply\"\x00\x12I\n\x0eInitVisibility\x12\x1a.collector.ConfigCassandra\x1a\x19.collector.CollectorReply\"\x00\x12\x45\n\x12TruncateVisibility\x12\x12.collector.Schemas\x1a\x19.collector.CollectorReply\"\x00\x62\x06proto3')
+)
+
+
+
+
+_CONFIGCASSANDRA = _descriptor.Descriptor(
+ name='ConfigCassandra',
+ full_name='collector.ConfigCassandra',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cassandra_hosts', full_name='collector.ConfigCassandra.cassandra_hosts', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cassandra_port', full_name='collector.ConfigCassandra.cassandra_port', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=30,
+ serialized_end=96,
+)
+
+
+_CONFIGCOLLECTOR = _descriptor.Descriptor(
+ name='ConfigCollector',
+ full_name='collector.ConfigCollector',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='t_port', full_name='collector.ConfigCollector.t_port', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='t_host', full_name='collector.ConfigCollector.t_host', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='m_port', full_name='collector.ConfigCollector.m_port', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='m_host', full_name='collector.ConfigCollector.m_host', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='c_port', full_name='collector.ConfigCollector.c_port', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='c_hosts', full_name='collector.ConfigCollector.c_hosts', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='sinterval', full_name='collector.ConfigCollector.sinterval', index=6,
+ number=7, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=99,
+ serialized_end=232,
+)
+
+
+_SCHEMAS = _descriptor.Descriptor(
+ name='Schemas',
+ full_name='collector.Schemas',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='schemas', full_name='collector.Schemas.schemas', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cassandra_hosts', full_name='collector.Schemas.cassandra_hosts', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cassandra_port', full_name='collector.Schemas.cassandra_port', index=2,
+ number=3, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=234,
+ serialized_end=309,
+)
+
+
+_COLLECTORREPLY = _descriptor.Descriptor(
+ name='CollectorReply',
+ full_name='collector.CollectorReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='collector.CollectorReply.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=311,
+ serialized_end=344,
+)
+
+DESCRIPTOR.message_types_by_name['ConfigCassandra'] = _CONFIGCASSANDRA
+DESCRIPTOR.message_types_by_name['ConfigCollector'] = _CONFIGCOLLECTOR
+DESCRIPTOR.message_types_by_name['Schemas'] = _SCHEMAS
+DESCRIPTOR.message_types_by_name['CollectorReply'] = _COLLECTORREPLY
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+ConfigCassandra = _reflection.GeneratedProtocolMessageType('ConfigCassandra', (_message.Message,), dict(
+ DESCRIPTOR = _CONFIGCASSANDRA,
+ __module__ = 'collector_pb2'
+ # @@protoc_insertion_point(class_scope:collector.ConfigCassandra)
+ ))
+_sym_db.RegisterMessage(ConfigCassandra)
+
+ConfigCollector = _reflection.GeneratedProtocolMessageType('ConfigCollector', (_message.Message,), dict(
+ DESCRIPTOR = _CONFIGCOLLECTOR,
+ __module__ = 'collector_pb2'
+ # @@protoc_insertion_point(class_scope:collector.ConfigCollector)
+ ))
+_sym_db.RegisterMessage(ConfigCollector)
+
+Schemas = _reflection.GeneratedProtocolMessageType('Schemas', (_message.Message,), dict(
+ DESCRIPTOR = _SCHEMAS,
+ __module__ = 'collector_pb2'
+ # @@protoc_insertion_point(class_scope:collector.Schemas)
+ ))
+_sym_db.RegisterMessage(Schemas)
+
+CollectorReply = _reflection.GeneratedProtocolMessageType('CollectorReply', (_message.Message,), dict(
+ DESCRIPTOR = _COLLECTORREPLY,
+ __module__ = 'collector_pb2'
+ # @@protoc_insertion_point(class_scope:collector.CollectorReply)
+ ))
+_sym_db.RegisterMessage(CollectorReply)
+
+
+
+_CONTROLLER = _descriptor.ServiceDescriptor(
+ name='Controller',
+ full_name='collector.Controller',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=347,
+ serialized_end=654,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='StopCollector',
+ full_name='collector.Controller.StopCollector',
+ index=0,
+ containing_service=None,
+ input_type=_CONFIGCOLLECTOR,
+ output_type=_COLLECTORREPLY,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='StartCollector',
+ full_name='collector.Controller.StartCollector',
+ index=1,
+ containing_service=None,
+ input_type=_CONFIGCOLLECTOR,
+ output_type=_COLLECTORREPLY,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='InitVisibility',
+ full_name='collector.Controller.InitVisibility',
+ index=2,
+ containing_service=None,
+ input_type=_CONFIGCASSANDRA,
+ output_type=_COLLECTORREPLY,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='TruncateVisibility',
+ full_name='collector.Controller.TruncateVisibility',
+ index=3,
+ containing_service=None,
+ input_type=_SCHEMAS,
+ output_type=_COLLECTORREPLY,
+ options=None,
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_CONTROLLER)
+
+DESCRIPTOR.services_by_name['Controller'] = _CONTROLLER
+
+# @@protoc_insertion_point(module_scope)
diff --git a/clover/collector/grpc/collector_pb2_grpc.py b/clover/collector/grpc/collector_pb2_grpc.py
new file mode 100644
index 0000000..a7be73c
--- /dev/null
+++ b/clover/collector/grpc/collector_pb2_grpc.py
@@ -0,0 +1,97 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+import collector_pb2 as collector__pb2
+
+
+class ControllerStub(object):
+ """The controller service definition.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.StopCollector = channel.unary_unary(
+ '/collector.Controller/StopCollector',
+ request_serializer=collector__pb2.ConfigCollector.SerializeToString,
+ response_deserializer=collector__pb2.CollectorReply.FromString,
+ )
+ self.StartCollector = channel.unary_unary(
+ '/collector.Controller/StartCollector',
+ request_serializer=collector__pb2.ConfigCollector.SerializeToString,
+ response_deserializer=collector__pb2.CollectorReply.FromString,
+ )
+ self.InitVisibility = channel.unary_unary(
+ '/collector.Controller/InitVisibility',
+ request_serializer=collector__pb2.ConfigCassandra.SerializeToString,
+ response_deserializer=collector__pb2.CollectorReply.FromString,
+ )
+ self.TruncateVisibility = channel.unary_unary(
+ '/collector.Controller/TruncateVisibility',
+ request_serializer=collector__pb2.Schemas.SerializeToString,
+ response_deserializer=collector__pb2.CollectorReply.FromString,
+ )
+
+
+class ControllerServicer(object):
+ """The controller service definition.
+ """
+
+ def StopCollector(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def StartCollector(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def InitVisibility(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def TruncateVisibility(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ControllerServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'StopCollector': grpc.unary_unary_rpc_method_handler(
+ servicer.StopCollector,
+ request_deserializer=collector__pb2.ConfigCollector.FromString,
+ response_serializer=collector__pb2.CollectorReply.SerializeToString,
+ ),
+ 'StartCollector': grpc.unary_unary_rpc_method_handler(
+ servicer.StartCollector,
+ request_deserializer=collector__pb2.ConfigCollector.FromString,
+ response_serializer=collector__pb2.CollectorReply.SerializeToString,
+ ),
+ 'InitVisibility': grpc.unary_unary_rpc_method_handler(
+ servicer.InitVisibility,
+ request_deserializer=collector__pb2.ConfigCassandra.FromString,
+ response_serializer=collector__pb2.CollectorReply.SerializeToString,
+ ),
+ 'TruncateVisibility': grpc.unary_unary_rpc_method_handler(
+ servicer.TruncateVisibility,
+ request_deserializer=collector__pb2.Schemas.FromString,
+ response_serializer=collector__pb2.CollectorReply.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'collector.Controller', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/clover/collector/grpc/collector_server.py b/clover/collector/grpc/collector_server.py
new file mode 100644
index 0000000..c2eb221
--- /dev/null
+++ b/clover/collector/grpc/collector_server.py
@@ -0,0 +1,98 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+
+from concurrent import futures
+from clover.collector.db.cassops import CassandraOps
+import time
+import sys
+import grpc
+import subprocess
+import pickle
+import logging
+import collector_pb2
+import collector_pb2_grpc
+
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+GRPC_PORT = '[::]:50054'
+
+
+class Controller(collector_pb2_grpc.ControllerServicer):
+
+ def __init__(self, init_visibility):
+ logging.basicConfig(filename='collector_server.log',
+ level=logging.DEBUG)
+ self.collector = 0
+ if init_visibility == 'set_schemas':
+ cassandra_hosts = pickle.dumps(['cassandra.default'])
+ self.InitVisibility(collector_pb2.ConfigCassandra(
+ cassandra_port=9042, cassandra_hosts=cassandra_hosts), "")
+
+ def StopCollector(self, r, context):
+ try:
+ subprocess.Popen.kill(self.collector)
+ msg = "Stopped collector on pid: {}".format(self.collector.pid)
+ except Exception as e:
+ logging.debug(e)
+ msg = "Failed to stop collector"
+ return collector_pb2.CollectorReply(message=msg)
+
+ def StartCollector(self, r, context):
+ try:
+ self.collector = subprocess.Popen(
+ ["python", "process/collect.py",
+ "-sinterval={}".format(r.sinterval),
+ "-c_port={}".format(r.c_port),
+ "-t_port={}".format(r.t_port), "-t_host={}".format(r.t_host),
+ "-m_port={}".format(r.m_port), "-m_host={}".format(r.m_host),
+ "-c_hosts={}".format(pickle.loads(r.c_hosts)), "&"],
+ shell=False)
+ msg = "Started collector on pid: {}".format(self.collector.pid)
+ except Exception as e:
+ logging.debug(e)
+ msg = e
+ return collector_pb2.CollectorReply(message=msg)
+
+ def InitVisibility(self, r, context):
+ try:
+ cass = CassandraOps(pickle.loads(r.cassandra_hosts),
+ r.cassandra_port)
+ cass.init_visibility()
+ msg = "Added visibility schemas in cassandra"
+ except Exception as e:
+ logging.debug(e)
+ msg = "Failed to initialize cassandra"
+ return collector_pb2.CollectorReply(message=msg)
+
+ def TruncateVisibility(self, r, context):
+ try:
+ cass = CassandraOps(pickle.loads(r.cassandra_hosts),
+ r.cassandra_port)
+ cass.truncate(pickle.loads(r.schemas))
+ msg = "Truncated visibility tables"
+ except Exception as e:
+ logging.debug(e)
+ msg = "Failed to truncate visibility"
+ return collector_pb2.CollectorReply(message=msg)
+
+
+def serve(init_visibility):
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ collector_pb2_grpc.add_ControllerServicer_to_server(
+ Controller(init_visibility), server)
+ server.add_insecure_port(GRPC_PORT)
+ server.start()
+ try:
+ while True:
+ time.sleep(_ONE_DAY_IN_SECONDS)
+ except KeyboardInterrupt:
+ server.stop(0)
+
+
+if __name__ == '__main__':
+ serve(sys.argv[1])
diff --git a/clover/collector/process/__init__.py b/clover/collector/process/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/clover/collector/process/__init__.py
diff --git a/clover/collector/process/collect.py b/clover/collector/process/collect.py
new file mode 100644
index 0000000..d8beb49
--- /dev/null
+++ b/clover/collector/process/collect.py
@@ -0,0 +1,162 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+from clover.tracing.tracing import Tracing
+from clover.monitoring.monitoring import Monitoring
+from clover.collector.db.cassops import CassandraOps
+from clover.collector.db.redisops import RedisOps
+
+# import pprint
+import time
+import argparse
+import logging
+import ast
+
+TRACING_SERVICES = ['istio-ingress']
+TRACING_PORT = "16686"
+MONITORING_PORT = "9090"
+CASSANDRA_PORT = 9042 # Provide as integer
+MONITORING_HOST = "prometheus.istio-system"
+TRACING_HOST = "jaeger-deployment.istio-system"
+CASSANDRA_HOSTS = ['cassandra.default']
+
+
+class Collector:
+
+ def __init__(self, t_port, t_host, m_port, m_host, c_port, c_hosts):
+ logging.basicConfig(filename='collector.log', level=logging.DEBUG)
+ try:
+ self.t = Tracing(t_host, t_port, '', False)
+ monitoring_url = "http://{}:{}".format(m_host, m_port)
+ self.m = Monitoring(monitoring_url)
+ self.c = CassandraOps(c_hosts, int(c_port))
+ self.c.set_prepared()
+ self.r = RedisOps()
+ except Exception as e:
+ logging.debug(e)
+
+ # Toplevel tracing retrieval and batch insert
+ def get_tracing(self, services, time_back=20):
+ self.c.set_batch()
+ for service in services:
+ traces = self.t.getTraces(service, time_back)
+ try:
+ self.set_tracing(traces)
+ except Exception as e:
+ logging.debug(e)
+ self.c.execute_batch()
+
+ # Insert to cassandra visibility traces and spans tables
+ def set_tracing(self, trace):
+ for traces in trace['data']:
+ for spans in traces['spans']:
+ span = {}
+ span['spanID'] = spans['spanID']
+ span['duration'] = spans['duration']
+ span['startTime'] = spans['startTime']
+ span['operationName'] = spans['operationName']
+ tag = {}
+ for tags in spans['tags']:
+ tag[tags['key']] = tags['value']
+ self.c.insert_tracing('spans', traces['traceID'],
+ span, tag)
+ process_list = []
+ for p in traces['processes']:
+ process_list.append(p)
+ service_names = []
+ for pname in process_list:
+ service_names.append(traces['processes'][pname]['serviceName'])
+ self.c.insert_trace(traces['traceID'], service_names)
+
+ # Insert to cassandra visibility metrics table
+ def get_monitoring(self):
+
+ # Fetch collector service/metric lists from redis
+ service_names = self.r.get_services()
+ metric_prefixes, metric_suffixes = self.r.get_metrics()
+
+ self.c.set_batch()
+ for sname in service_names:
+ for prefix in metric_prefixes:
+ for suffix in metric_suffixes:
+ try:
+ metric_name = prefix + sname + suffix
+ query_params = {
+ "type": "instant",
+ "query": metric_name
+ }
+ data = self.m.query(query_params)
+ m_value = data['data']['result'][0]['value'][1]
+ m_time = data['data']['result'][0]['value'][0]
+ mn = data['data']['result'][0]['metric']['__name__']
+ self.c.insert_metric(mn, m_value, str(m_time), sname)
+ except Exception as e:
+ logging.debug(e)
+ self.c.execute_batch()
+
+ # TODO add batch retrieval for monitoring metrics
+ # query_range_param = {
+ # "type": "range",
+ # "query": "tbd",
+ # "start": "60m",
+ # "end": "5m",
+ # "step": "30s"
+ # }
+ # data = self.m.query(query_range_param)
+ # pp = pprint.PrettyPrinter(indent=2)
+ # pp.pprint(data)
+
+
+def main(args):
+ if isinstance(args['c_hosts'], basestring):
+ ch = ast.literal_eval(args['c_hosts'])
+ else:
+ ch = args['c_hosts']
+
+ c = Collector(args['t_port'], args['t_host'], args['m_port'],
+ args['m_host'], args['c_port'], ch)
+
+ # Collector loop
+ loop = True
+ while loop:
+ try:
+ c.get_tracing(args['t_services'])
+ c.get_monitoring()
+ time.sleep(int(args['sinterval']))
+ except KeyboardInterrupt:
+ loop = False
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-sinterval', default=5,
+ help='Sample interval for collector loop')
+ parser.add_argument(
+ '-t_port', default=TRACING_PORT,
+ help='Port to access Jaeger tracing')
+ parser.add_argument(
+ '-m_port', default=MONITORING_PORT,
+ help='Port to access Prometheus monitoring')
+ parser.add_argument(
+ '-t_host', default=TRACING_HOST,
+ help='Host to access Jaeger tracing')
+ parser.add_argument(
+ '-m_host', default=MONITORING_HOST,
+ help='Host to access Prometheus monitoring')
+ parser.add_argument(
+ '-c_hosts', default=CASSANDRA_HOSTS,
+ help='Host(s) to access Cassandra cluster')
+ parser.add_argument(
+ '-c_port', default=CASSANDRA_PORT,
+ help='Port to access Cassandra cluster')
+ parser.add_argument(
+ '-t_services', default=TRACING_SERVICES,
+ help='Collect services on this list of services')
+
+ args, unknown = parser.parse_known_args()
+ print(main(vars(args)))
diff --git a/clover/collector/process/grpc_process.sh b/clover/collector/process/grpc_process.sh
new file mode 100755
index 0000000..30e0171
--- /dev/null
+++ b/clover/collector/process/grpc_process.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+python grpc/collector_server.py test1
diff --git a/clover/collector/yaml/manifest.template b/clover/collector/yaml/manifest.template
new file mode 100644
index 0000000..c7aa3e7
--- /dev/null
+++ b/clover/collector/yaml/manifest.template
@@ -0,0 +1,43 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ deploy_name }}
+ labels:
+ app: {{ deploy_name }}
+spec:
+ template:
+ metadata:
+ labels:
+ app: {{ deploy_name }}
+ spec:
+ containers:
+ - name: {{ deploy_name }}
+ image: {{ image_path }}/{{ image_name }}:{{ image_tag }}
+ ports:
+ - containerPort: {{ grpc_port }}
+ - containerPort: {{ redis_port }}
+ - containerPort: {{ monitor_port }}
+ - containerPort: {{ trace_port }}
+ - containerPort: {{ cass_port }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ deploy_name }}
+ labels:
+ app: {{ deploy_name }}
+spec:
+ ports:
+ - port: {{ grpc_port }}
+ name: grpc
+ - port: {{ redis_port }}
+ name: redis
+ - port: {{ trace_port }}
+ name: jaeger-deployment
+ - port: {{ monitor_port }}
+ name: prometheus
+ - port: {{ cass_port }}
+ name: cassandra
+ selector:
+ app: {{ deploy_name }}
diff --git a/clover/collector/yaml/render_yaml.py b/clover/collector/yaml/render_yaml.py
new file mode 100644
index 0000000..c1d8be7
--- /dev/null
+++ b/clover/collector/yaml/render_yaml.py
@@ -0,0 +1,73 @@
+# Copyright (c) Authors of Clover
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import argparse
+
+from jinja2 import Template
+
+
+def render_yaml(args):
+ template_file = 'manifest.template'
+ out_file = args['deploy_name'] + '.yaml'
+
+ try:
+ with open(template_file) as f:
+ tmpl = Template(f.read())
+ output = tmpl.render(
+ image_path=args['image_path'],
+ image_name=args['image_name'],
+ image_tag=args['image_tag'],
+ deploy_name=args['deploy_name'],
+ grpc_port=args['grpc_port'],
+ monitor_port=args['monitor_port'],
+ redis_port=args['redis_port'],
+ cass_port=args['cass_port'],
+ trace_port=args['trace_port']
+ )
+ with open(out_file, "wb") as fh:
+ fh.write(output)
+ return "Generated manifest for {}".format(args['deploy_name'])
+ except Exception as e:
+ print(e)
+ return "Unable to generate manifest for {}".format(
+ args['deploy_name'])
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--image_name', default='clover-collector',
+ help='The image name to use')
+ parser.add_argument(
+ # '--image_path', default='opnfv',
+ '--image_path', default='localhost:5000',
+ help='The path to the image to use')
+ parser.add_argument(
+ # '--image_tag', default='opnfv-6.0.0',
+ '--image_tag', default='latest',
+ help='The image tag to use')
+ parser.add_argument(
+ '--deploy_name', default='clover-collector',
+ help='The k8s deploy name to use')
+ parser.add_argument(
+ '--redis_port', default='6379',
+ help='The redis port to connect for management')
+ parser.add_argument(
+ '--monitor_port', default='9090',
+ help='The Prometheus monitoring port')
+ parser.add_argument(
+ '--grpc_port', default='50054',
+ help='The GRPC server port for collector management')
+ parser.add_argument(
+ '--trace_port', default='16686',
+ help='The Jaeger tracing port')
+ parser.add_argument(
+ '--cass_port', default='9042',
+ help='The Cassandra port')
+
+ args = parser.parse_args()
+ print(render_yaml(vars(args)))
diff --git a/clover/logging/install/elasticsearch-statefulset-service.yaml b/clover/logging/install/elasticsearch-statefulset-service.yaml
new file mode 100644
index 0000000..0fcc832
--- /dev/null
+++ b/clover/logging/install/elasticsearch-statefulset-service.yaml
@@ -0,0 +1,129 @@
+# RBAC authn and authz
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: elasticsearch-logging
+ namespace: kube-system
+ labels:
+ k8s-app: elasticsearch-logging
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: elasticsearch-logging
+ labels:
+ k8s-app: elasticsearch-logging
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - "services"
+ - "namespaces"
+ - "endpoints"
+ verbs:
+ - "get"
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ namespace: kube-system
+ name: elasticsearch-logging
+ labels:
+ k8s-app: elasticsearch-logging
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+- kind: ServiceAccount
+ name: elasticsearch-logging
+ namespace: kube-system
+ apiGroup: ""
+roleRef:
+ kind: ClusterRole
+ name: elasticsearch-logging
+ apiGroup: ""
+---
+# Elasticsearch deployment itself
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-logging
+ namespace: kube-system
+ labels:
+ k8s-app: elasticsearch-logging
+ version: v5.6.4
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ serviceName: elasticsearch-logging
+ replicas: 2
+ selector:
+ matchLabels:
+ k8s-app: elasticsearch-logging
+ version: v5.6.4
+ template:
+ metadata:
+ labels:
+ k8s-app: elasticsearch-logging
+ version: v5.6.4
+ kubernetes.io/cluster-service: "true"
+ spec:
+ serviceAccountName: elasticsearch-logging
+ containers:
+ - image: k8s.gcr.io/elasticsearch:v5.6.4
+ name: elasticsearch-logging
+ resources:
+ # need more cpu upon initialization, therefore burstable class
+ limits:
+ cpu: 1000m
+ requests:
+ cpu: 100m
+ ports:
+ - containerPort: 9200
+ name: db
+ protocol: TCP
+ - containerPort: 9300
+ name: transport
+ protocol: TCP
+ volumeMounts:
+ - name: elasticsearch-logging
+ mountPath: /data
+ env:
+ - name: "NAMESPACE"
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumes:
+ - name: elasticsearch-logging
+ emptyDir: {}
+ # Elasticsearch requires vm.max_map_count to be at least 262144.
+ # If your OS already sets up this number to a higher value, feel free
+ # to remove this init container.
+ initContainers:
+ - image: alpine:3.6
+ command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
+ name: elasticsearch-logging-init
+ securityContext:
+ privileged: true
+---
+# Elasticsearch Service
+apiVersion: v1
+kind: Service
+metadata:
+ name: elasticsearch-logging
+ namespace: kube-system
+ labels:
+ k8s-app: elasticsearch-logging
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "Elasticsearch"
+spec:
+ ports:
+ - port: 9200
+ protocol: TCP
+ targetPort: db
+ selector:
+ k8s-app: elasticsearch-logging
diff --git a/clover/logging/install/fluentd-daemonset-elasticsearch-rbac.yaml b/clover/logging/install/fluentd-daemonset-elasticsearch-rbac.yaml
new file mode 100644
index 0000000..8131ef5
--- /dev/null
+++ b/clover/logging/install/fluentd-daemonset-elasticsearch-rbac.yaml
@@ -0,0 +1,96 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: fluentd
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: fluentd
+ namespace: kube-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: fluentd
+roleRef:
+ kind: ClusterRole
+ name: fluentd
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+ name: fluentd
+ namespace: kube-system
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: fluentd
+ namespace: kube-system
+ labels:
+ k8s-app: fluentd-logging
+ version: v1
+ kubernetes.io/cluster-service: "true"
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: fluentd-logging
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ spec:
+ serviceAccount: fluentd
+ serviceAccountName: fluentd
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: fluentd
+ image: fluent/fluentd-kubernetes-daemonset:elasticsearch
+ env:
+ - name: FLUENT_ELASTICSEARCH_HOST
+ value: "elasticsearch-logging"
+ - name: FLUENT_ELASTICSEARCH_PORT
+ value: "9200"
+ - name: FLUENT_ELASTICSEARCH_SCHEME
+ value: "http"
+ # X-Pack Authentication
+ # =====================
+ - name: FLUENT_ELASTICSEARCH_USER
+ value: "elastic"
+ - name: FLUENT_ELASTICSEARCH_PASSWORD
+ value: "changeme"
+ resources:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ volumeMounts:
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
diff --git a/clover/logging/install/proxy-access-control-sidecar.yml b/clover/logging/install/proxy-access-control-sidecar.yml
new file mode 100644
index 0000000..833f9f7
--- /dev/null
+++ b/clover/logging/install/proxy-access-control-sidecar.yml
@@ -0,0 +1,32 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: proxy-access-control
+ labels:
+ app: proxy-access-control
+spec:
+ template:
+ metadata:
+ labels:
+ app: proxy-access-control
+ spec:
+ containers:
+ - name: proxy-access-control
+ image: opnfv/clover-ns-nginx-proxy:latest
+ ports:
+ - containerPort: 50054
+ - containerPort: 9180
+# inject nginx access log streaming
+ volumeMounts:
+ - name: nginxlog
+ mountPath: /var/log/nginx
+ - name: nginx-access-log
+ image: busybox
+ args: [/bin/sh, -c, 'tail -n+1 -f /var/log/nginx/access.log']
+ volumeMounts:
+ - name: nginxlog
+ mountPath: /var/log/nginx
+ volumes:
+ - name: nginxlog
+ emptyDir: {}
diff --git a/clover/monitoring/monitoring.py b/clover/monitoring/monitoring.py
index 9726fd1..ec97e82 100644
--- a/clover/monitoring/monitoring.py
+++ b/clover/monitoring/monitoring.py
@@ -90,8 +90,9 @@ class Monitoring(object):
print("query %s %s, status=%s, size=%d, dur=%.3f" % \
(self.host, query_params["query"], resp.status_code, len(resp.text), dur))
- pp = pprint.PrettyPrinter(indent=2)
- pp.pprint(resp.json())
+ #pp = pprint.PrettyPrinter(indent=2)
+ ##pp.pprint(resp.json())
+ return resp.json()
except Exception as e:
print("ERROR: Could not query prometheus instance %s. \n %s" % (url, e))
diff --git a/clover/test/fraser_a_b_test.py b/clover/test/fraser_a_b_test.py
index cfbc79f..2270e04 100644
--- a/clover/test/fraser_a_b_test.py
+++ b/clover/test/fraser_a_b_test.py
@@ -218,6 +218,8 @@ def main(argv):
test_id = uuid.uuid4()
rr.set_route_rules(test_id)
tracing.setTest(test_id)
+ # wait 1 sec to avoid missing the first test result
+ time.sleep(1)
try:
output = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError, e:
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 2cd6340..5cc7323 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -20,7 +20,7 @@ RUN apt-get update \
&& apt-get install -y git python-setuptools python-pip curl apt-transport-https \
&& apt-get -y autoremove && apt-get clean \
&& pip install --upgrade pip \
- && python -m pip install grpcio argparse
+ && python -m pip install grpcio argparse protobuf
# Fetch source code
RUN mkdir -p ${REPOS_DIR}
diff --git a/docs/development/design/logging.rst b/docs/development/design/logging.rst
index 05f3f5b..2c2dfe6 100644
--- a/docs/development/design/logging.rst
+++ b/docs/development/design/logging.rst
@@ -9,7 +9,27 @@ Installation
Currently, we use the `sample configuration`_ in Istio to install fluentd::
cd clover/logging
- kubectl apply -f install
+
+First, install logging stack Elasticsearch, Fluentd and Kibana::
+
+ kubectl apply -f install/logging-stack.yaml
+
+Note that, it must be done in separated steps. If you run ``kubectl apply -f
+install`` instead, the mixer adapter may fail to intialize because the target
+service can not be found. You may find an error message from mixer container::
+
+ 2018-05-09T02:43:14.435156Z error Unable to initialize adapter:
+ snapshot='6', handler='handler.fluentd.istio-system', adapter='fluentd',
+ err='adapter instantiation error: dial tcp: lookup fluentd-es.logging on
+ 10.96.0.10:53: no such host'.
+
+Then configure fluentd for istio::
+
+ kubectl apply -f install/fluentd-istio.yaml
+
+Configure fluentd for node level logging::
+
+ kubectl apply -f install/fluentd-daemonset-elasticsearch-rbac.yaml
.. _sample configuration: https://istio.io/docs/tasks/telemetry/fluentd.html
@@ -50,14 +70,14 @@ Istio defines when to log by creating a custom resource ``rule``. For example:
apiVersion: "config.istio.io/v1alpha2"
kind: rule
metadata:
- name: newlogtofluentd
- namespace: istio-system
+ name: newlogtofluentd
+ namespace: istio-system
spec:
- match: "true" # match for all requests
- actions:
- - handler: handler.fluentd
- instances:
- - newlog.logentry
+ match: "true" # match for all requests
+ actions:
+ - handler: handler.fluentd
+ instances:
+ - newlog.logentry
This rule specifies that all instances of ``newlog.logentry`` that matches the
expression will be handled by the specified handler ``handler.fluentd``. We
diff --git a/docs/release/configguide/sdc_config_guide.rst b/docs/release/configguide/sdc_config_guide.rst
index be0c86e..b95b6cf 100644
--- a/docs/release/configguide/sdc_config_guide.rst
+++ b/docs/release/configguide/sdc_config_guide.rst
@@ -314,7 +314,7 @@ following command:
istio-system jaeger-deployment NodePort 10.105.94.85 <none> 16686:32174/TCP
istio-system prometheus NodePort 10.97.74.230 <none> 9090:32708/TCP
-In the example above, the Jaeger tracing web-based UI will be available on port 32171 and
+In the example above, the Jaeger tracing web-based UI will be available on port 32174 and
the Prometheus monitoring UI on port 32708. In your browser, navigate to the following
URLs for Jaeger and Prometheus respectively::
diff --git a/samples/services/nginx/docker/subservices/lb/Dockerfile b/samples/services/nginx/docker/subservices/lb/Dockerfile
index 125da0b..3b13631 100644
--- a/samples/services/nginx/docker/subservices/lb/Dockerfile
+++ b/samples/services/nginx/docker/subservices/lb/Dockerfile
@@ -20,7 +20,7 @@ RUN \
python-pip \
&& \
# Install required python packages
- python -m pip install grpcio redis jinja2
+ python -m pip install grpcio redis jinja2 protobuf
COPY /process /process
COPY /grpc /grpc
diff --git a/samples/services/nginx/docker/subservices/proxy/Dockerfile b/samples/services/nginx/docker/subservices/proxy/Dockerfile
index 0f061a5..b063de7 100644
--- a/samples/services/nginx/docker/subservices/proxy/Dockerfile
+++ b/samples/services/nginx/docker/subservices/proxy/Dockerfile
@@ -20,7 +20,7 @@ RUN \
python-pip \
&& \
# Install required python packages
- python -m pip install grpcio redis jinja2
+ python -m pip install grpcio redis jinja2 protobuf
COPY /process /process
COPY /grpc /grpc
diff --git a/samples/services/nginx/docker/subservices/server/Dockerfile b/samples/services/nginx/docker/subservices/server/Dockerfile
index 8bf9449..434a8d4 100644
--- a/samples/services/nginx/docker/subservices/server/Dockerfile
+++ b/samples/services/nginx/docker/subservices/server/Dockerfile
@@ -20,7 +20,7 @@ RUN \
python-pip \
&& \
# Install required python packages
- python -m pip install grpcio redis jinja2
+ python -m pip install grpcio redis jinja2 protobuf
COPY /process /process
COPY /grpc /grpc
diff --git a/samples/services/snort_ids/docker/Dockerfile b/samples/services/snort_ids/docker/Dockerfile
index 50686ed..697acb9 100644
--- a/samples/services/snort_ids/docker/Dockerfile
+++ b/samples/services/snort_ids/docker/Dockerfile
@@ -70,7 +70,7 @@ RUN \
ln -s libdnet.so.1.0.1 libdnet.1 \
&& \
# Install required python libraries
- python -m pip install grpcio redis idstools
+ python -m pip install grpcio redis idstools protobuf
COPY /process /process
COPY /grpc /grpc
diff --git a/xci-k8s-setup.sh b/xci-k8s-setup.sh
new file mode 100755
index 0000000..b08be7a
--- /dev/null
+++ b/xci-k8s-setup.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+set -e
+set -x
+
+CLOVER_DIR=`cd ${BASH_SOURCE[0]%/*}/;pwd`
+export CLOVER_DIR
+
+# Set the variable for deploying k8s
+export XCI_FLAVOR=${XCI_FLAVOR:-mini}
+export INSTALLER_TYPE=${INSTALLER_TYPE:-kubespray}
+export DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-k8-flannel-nofeature}
+
+if [[ $(whoami) == "root" ]]; then
+ echo "ERROR: This script should not be run as root!"
+ exit 1
+fi
+
+WORK_DIR=${CLOVER_DIR}/work
+sudo rm -rf $WORK_DIR
+mkdir $WORK_DIR
+
+# If SSH key doesn't exist generate an SSH key in $HOME/.ssh/
+[[ ! -d "$HOME/.ssh/" ]] && mkdir $HOME/.ssh/
+[[ ! -f "$HOME/.ssh/id_rsa" ]] && ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""
+
+sudo apt-get update
+sudo apt-get install git python-pip -y
+
+git clone https://gerrit.opnfv.org/gerrit/releng-xci $WORK_DIR/releng-xci
+
+cd $WORK_DIR/releng-xci/xci
+
+source xci-deploy.sh
+
+MASTER_IP=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config | awk '{print \$2}' |awk -F '[:/]' '{print \$4}'")
+echo "----------------------------------------"
+echo "Info: You can login the Kubernetes Cluster master host"
+echo "ssh root@$MASTER_IP"
+echo "----------------------------------------"