summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/clear.go2
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/clear_visibility.go7
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/create.go4
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/create_docker_registry.go16
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/create_idsrules.go17
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/create_kubernetes.go18
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/create_testplan.go22
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/delete.go4
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/delete_docker_registry.go11
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/delete_kubernetes.go11
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/get.go4
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/get_docker_registry.go6
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/get_kubernetes.go6
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/get_services.go2
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/get_testresult.go47
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/get_visibility.go11
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/init.go2
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/init_visibility.go10
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/provider.go6
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/root.go58
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/set_nginx.go19
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/set_visibility.go16
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/start.go4
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/start_ids.go11
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/start_testplan.go23
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/start_visibility.go13
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/stop.go4
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/stop_ids.go9
-rw-r--r--clover/cloverctl/src/cloverctl/cmd/stop_visibility.go7
-rw-r--r--clover/cloverctl/src/cloverctl/yaml/jmeter_testplan.yaml7
-rw-r--r--clover/clovisor/Dockerfile3
-rw-r--r--clover/controller/control/api/jmeter.py9
-rw-r--r--clover/controller/control/templates/request_counts.html21
-rwxr-xr-xclover/spark/docker/clover-spark/build.sh2
-rwxr-xr-xclover/spark/docker/spark-submit/runner.sh2
-rwxr-xr-xclover/spark/docker/spark-submit/runner_fast.sh2
-rw-r--r--clover/spark/src/main/scala/CloverSlow.scala50
-rw-r--r--clover/tools/jmeter/jmeter-master/grpc/jmeter.proto6
-rw-r--r--clover/tools/jmeter/jmeter-master/grpc/jmeter_pb2.py34
-rw-r--r--clover/tools/jmeter/jmeter-master/grpc/jmeter_server.py3
-rw-r--r--clover/tools/jmeter/jmeter-master/tests/jmx.template12
-rw-r--r--docs/development/design/clovisor.rst205
-rw-r--r--docs/development/design/index.rst1
-rw-r--r--docs/release/configguide/clovisor_config_guide.rst156
-rw-r--r--docs/release/configguide/controller_services_config_guide.rst181
-rw-r--r--docs/release/configguide/index.rst14
-rw-r--r--docs/release/configguide/jmeter_config_guide.rst298
-rw-r--r--docs/release/release-notes/release-notes.rst45
-rw-r--r--docs/release/userguide/index.rst8
-rw-r--r--docs/release/userguide/userguide.rst57
-rw-r--r--download/cloverctl.tar.gzbin0 -> 10050633 bytes
51 files changed, 1253 insertions, 233 deletions
diff --git a/clover/cloverctl/src/cloverctl/cmd/clear.go b/clover/cloverctl/src/cloverctl/cmd/clear.go
index 309df70..eab784a 100644
--- a/clover/cloverctl/src/cloverctl/cmd/clear.go
+++ b/clover/cloverctl/src/cloverctl/cmd/clear.go
@@ -17,7 +17,7 @@ var clearCmd = &cobra.Command{
Short: "Truncate visibility tables",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("clear called")
+ fmt.Println("Uncomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/clear_visibility.go b/clover/cloverctl/src/cloverctl/cmd/clear_visibility.go
index 2ad43f1..2e66637 100644
--- a/clover/cloverctl/src/cloverctl/cmd/clear_visibility.go
+++ b/clover/cloverctl/src/cloverctl/cmd/clear_visibility.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -16,7 +17,7 @@ import (
var visibilityclearCmd = &cobra.Command{
Use: "visibility",
- Short: "Clear visibility tables",
+ Short: "Clear visibility data",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
clearCollector()
@@ -28,12 +29,14 @@ func init() {
}
func clearCollector() {
+ checkControllerIP()
url := controllerIP + "/visibility/clear"
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/create.go b/clover/cloverctl/src/cloverctl/cmd/create.go
index 3a09fa4..a66acf9 100644
--- a/clover/cloverctl/src/cloverctl/cmd/create.go
+++ b/clover/cloverctl/src/cloverctl/cmd/create.go
@@ -14,10 +14,10 @@ import (
var createCmd = &cobra.Command{
Use: "create",
- Short: "Create resources including IDS rules, L7 testplans, etc.",
+ Short: "Create clover configurations and deployments",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("create called")
+ fmt.Println("Incomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/create_docker_registry.go b/clover/cloverctl/src/cloverctl/cmd/create_docker_registry.go
index 77045f6..37e8aeb 100644
--- a/clover/cloverctl/src/cloverctl/cmd/create_docker_registry.go
+++ b/clover/cloverctl/src/cloverctl/cmd/create_docker_registry.go
@@ -10,7 +10,7 @@ package cmd
import (
"fmt"
"io/ioutil"
-
+ "os"
"gopkg.in/resty.v1"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
@@ -28,21 +28,24 @@ var dockerregistryCmd = &cobra.Command{
func init() {
providercreateCmd.AddCommand(dockerregistryCmd)
- dockerregistryCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file to add kubernetes provider")
+ dockerregistryCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input yaml file to add docker registry")
dockerregistryCmd.MarkFlagRequired("file")
}
func createDockerRegistry() {
+ checkControllerIP()
url := controllerIP + "/halyard/addregistry"
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
- fmt.Println("Please specify a valid rule definition yaml file")
- return
+ fmt.Println("Please specify a valid yaml file")
+ os.Exit(1)
}
out_json, err := yaml.YAMLToJSON(in)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
resp, err := resty.R().
@@ -50,7 +53,8 @@ func createDockerRegistry() {
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
diff --git a/clover/cloverctl/src/cloverctl/cmd/create_idsrules.go b/clover/cloverctl/src/cloverctl/cmd/create_idsrules.go
index bc0d8d5..6e59297 100644
--- a/clover/cloverctl/src/cloverctl/cmd/create_idsrules.go
+++ b/clover/cloverctl/src/cloverctl/cmd/create_idsrules.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"io/ioutil"
"gopkg.in/resty.v1"
"github.com/ghodss/yaml"
@@ -18,7 +19,7 @@ import (
var idsrulesCmd = &cobra.Command{
Use: "idsrules",
- Short: "Create one or many IDS rules from yaml file",
+ Short: "Create one or many snort IDS rules from yaml file",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
createIDSRules()
@@ -27,30 +28,32 @@ var idsrulesCmd = &cobra.Command{
func init() {
createCmd.AddCommand(idsrulesCmd)
- idsrulesCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file to add IDS rules")
+ idsrulesCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input yaml file to add IDS rules")
idsrulesCmd.MarkFlagRequired("file")
}
func createIDSRules() {
+ checkControllerIP()
url := controllerIP + "/snort/addrule"
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
fmt.Println("Please specify a valid rule definition yaml file")
- return
+ os.Exit(1)
}
out_json, err := yaml.YAMLToJSON(in)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
- //fmt.Println(string(out_json))
-
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/create_kubernetes.go b/clover/cloverctl/src/cloverctl/cmd/create_kubernetes.go
index 7311090..8ff4394 100644
--- a/clover/cloverctl/src/cloverctl/cmd/create_kubernetes.go
+++ b/clover/cloverctl/src/cloverctl/cmd/create_kubernetes.go
@@ -12,7 +12,7 @@ import (
"time"
"io/ioutil"
"strings"
-
+ "os"
"gopkg.in/resty.v1"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
@@ -33,7 +33,7 @@ type DockerRegistry struct {
var kubeproviderCmd = &cobra.Command{
Use: "kubernetes",
- Short: "Add one kubernete provider from yaml file to spinnaker",
+ Short: "Add one kubernetes provider from yaml file to spinnaker",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
createProvider()
@@ -42,17 +42,19 @@ var kubeproviderCmd = &cobra.Command{
func init() {
providercreateCmd.AddCommand(kubeproviderCmd)
- kubeproviderCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file to add kubernetes provider")
+ kubeproviderCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input yaml file to add kubernetes provider")
kubeproviderCmd.MarkFlagRequired("file")
}
func createProvider() {
+ checkControllerIP()
url := controllerIP + "/halyard/addkube"
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
- fmt.Println("Please specify a valid rule definition yaml file")
- return
+ fmt.Println("Please specify a valid yaml file")
+ os.Exit(1)
}
t := Kubernetes{}
@@ -73,14 +75,16 @@ func createProvider() {
newconfig, _ := yaml.Marshal(&t)
out_json, err := yaml.YAMLToJSON(newconfig)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
diff --git a/clover/cloverctl/src/cloverctl/cmd/create_testplan.go b/clover/cloverctl/src/cloverctl/cmd/create_testplan.go
index 686d5ba..1d9d8b2 100644
--- a/clover/cloverctl/src/cloverctl/cmd/create_testplan.go
+++ b/clover/cloverctl/src/cloverctl/cmd/create_testplan.go
@@ -9,49 +9,51 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"io/ioutil"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
+
)
var testplanCmd = &cobra.Command{
Use: "testplan",
- Short: "Create L7 client emulation test plans from yaml file",
+ Short: "Create jmeter L7 client emulation test plan from yaml file",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
createTestPlan()
- //fmt.Printf("%v\n", cmd.Parent().CommandPath())
},
}
func init() {
createCmd.AddCommand(testplanCmd)
- testplanCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file with test plan params")
+ testplanCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input test plan yaml file")
testplanCmd.MarkFlagRequired("file")
}
func createTestPlan() {
+ checkControllerIP()
url := controllerIP + "/jmeter/gen"
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
- fmt.Println("Please specify a valid test plan yaml file")
- return
+ fmt.Println("Please specify a valid yaml file")
+ os.Exit(1)
}
out_json, err := yaml.YAMLToJSON(in)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
- //fmt.Println(string(out_json))
-
}
-
diff --git a/clover/cloverctl/src/cloverctl/cmd/delete.go b/clover/cloverctl/src/cloverctl/cmd/delete.go
index 742d769..34070e7 100644
--- a/clover/cloverctl/src/cloverctl/cmd/delete.go
+++ b/clover/cloverctl/src/cloverctl/cmd/delete.go
@@ -14,10 +14,10 @@ import (
var deleteCmd = &cobra.Command{
Use: "delete",
- Short: "Delete resources including clover-system services",
+ Short: "Delete configurations and clover services",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("delete called")
+ fmt.Println("Incomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/delete_docker_registry.go b/clover/cloverctl/src/cloverctl/cmd/delete_docker_registry.go
index d4403a5..3bb411a 100644
--- a/clover/cloverctl/src/cloverctl/cmd/delete_docker_registry.go
+++ b/clover/cloverctl/src/cloverctl/cmd/delete_docker_registry.go
@@ -10,14 +10,14 @@ package cmd
import (
"fmt"
"encoding/json"
-
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
var deldockerproviderCmd = &cobra.Command{
Use: "docker-registry",
- Short: "delete one docker registry provider by name from spinnaker",
+ Short: "Delete one docker registry provider by name from spinnaker",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
deldockerProvider()
@@ -26,12 +26,14 @@ var deldockerproviderCmd = &cobra.Command{
func init() {
providerdelCmd.AddCommand(deldockerproviderCmd)
- deldockerproviderCmd.Flags().StringVarP(&name, "name", "n", "", "Input docker-registry account name")
+ deldockerproviderCmd.Flags().StringVarP(&name, "name", "n", "",
+ "Input docker-registry account name")
deldockerproviderCmd.MarkFlagRequired("name")
}
func deldockerProvider() {
+ checkControllerIP()
url := controllerIP + "/halyard/delprovider"
var in = map[string]string{"name": name, "provider":"dockerRegistry"}
@@ -45,7 +47,8 @@ func deldockerProvider() {
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
diff --git a/clover/cloverctl/src/cloverctl/cmd/delete_kubernetes.go b/clover/cloverctl/src/cloverctl/cmd/delete_kubernetes.go
index 77b466a..a6f29f9 100644
--- a/clover/cloverctl/src/cloverctl/cmd/delete_kubernetes.go
+++ b/clover/cloverctl/src/cloverctl/cmd/delete_kubernetes.go
@@ -10,7 +10,7 @@ package cmd
import (
"fmt"
"encoding/json"
-
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -18,7 +18,7 @@ import (
var name string
var delkubeproviderCmd = &cobra.Command{
Use: "kubernetes",
- Short: "delete one kubernete provider by name from spinnaker",
+ Short: "Delete one kubernetes provider by name from spinnaker",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
delProvider()
@@ -27,12 +27,14 @@ var delkubeproviderCmd = &cobra.Command{
func init() {
providerdelCmd.AddCommand(delkubeproviderCmd)
- delkubeproviderCmd.Flags().StringVarP(&name, "name", "n", "", "Input kubernetes account name")
+ delkubeproviderCmd.Flags().StringVarP(&name, "name", "n", "",
+ "Input kubernetes account name")
delkubeproviderCmd.MarkFlagRequired("name")
}
func delProvider() {
+ checkControllerIP()
url := controllerIP + "/halyard/delprovider"
var in = map[string]string{"name": name, "provider":"kubernetes"}
@@ -46,7 +48,8 @@ func delProvider() {
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
diff --git a/clover/cloverctl/src/cloverctl/cmd/get.go b/clover/cloverctl/src/cloverctl/cmd/get.go
index ae3d98e..c8493be 100644
--- a/clover/cloverctl/src/cloverctl/cmd/get.go
+++ b/clover/cloverctl/src/cloverctl/cmd/get.go
@@ -14,10 +14,10 @@ import (
var getCmd = &cobra.Command{
Use: "get",
- Short: "Get information about a resource",
+ Short: "Get information about a configuration or deployment",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("get called")
+ fmt.Println("Incomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/get_docker_registry.go b/clover/cloverctl/src/cloverctl/cmd/get_docker_registry.go
index 93c1b3e..7ae94c9 100644
--- a/clover/cloverctl/src/cloverctl/cmd/get_docker_registry.go
+++ b/clover/cloverctl/src/cloverctl/cmd/get_docker_registry.go
@@ -11,7 +11,7 @@ import (
"fmt"
"strings"
"encoding/json"
-
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -31,6 +31,7 @@ func init() {
}
func getdocker() {
+ checkControllerIP()
url := controllerIP + "/halyard/account"
var provider = map[string]string{"name": "dockerRegistry"}
@@ -44,7 +45,8 @@ func getdocker() {
SetBody(out_json).
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
if resp.StatusCode() != 200 {
fmt.Printf("\n%v\n", resp)
diff --git a/clover/cloverctl/src/cloverctl/cmd/get_kubernetes.go b/clover/cloverctl/src/cloverctl/cmd/get_kubernetes.go
index 16dcca1..bd5875c 100644
--- a/clover/cloverctl/src/cloverctl/cmd/get_kubernetes.go
+++ b/clover/cloverctl/src/cloverctl/cmd/get_kubernetes.go
@@ -11,7 +11,7 @@ import (
"fmt"
"strings"
"encoding/json"
-
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -31,6 +31,7 @@ func init() {
}
func getkube() {
+ checkControllerIP()
url := controllerIP + "/halyard/account"
var provider = map[string]string{"name": "kubernetes"}
@@ -44,7 +45,8 @@ func getkube() {
SetBody(out_json).
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
if resp.StatusCode() != 200 {
fmt.Printf("\n%v\n", resp)
diff --git a/clover/cloverctl/src/cloverctl/cmd/get_services.go b/clover/cloverctl/src/cloverctl/cmd/get_services.go
index cfa56bd..bab13ce 100644
--- a/clover/cloverctl/src/cloverctl/cmd/get_services.go
+++ b/clover/cloverctl/src/cloverctl/cmd/get_services.go
@@ -14,7 +14,7 @@ import (
var servicesCmd = &cobra.Command{
Use: "services",
- Short: "Get info on Kubernetes services",
+ Short: "Get listing of Kubernetes services",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
cloverkube.GetServices()
diff --git a/clover/cloverctl/src/cloverctl/cmd/get_testresult.go b/clover/cloverctl/src/cloverctl/cmd/get_testresult.go
index 12d47c3..f9d8e6d 100644
--- a/clover/cloverctl/src/cloverctl/cmd/get_testresult.go
+++ b/clover/cloverctl/src/cloverctl/cmd/get_testresult.go
@@ -9,37 +9,54 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
-var JmeterResult string
-
var testresultCmd = &cobra.Command{
Use: "testresult",
- Short: "Get test results from L7 client emulation",
+ Short: "Get test results from jmeter L7 client emulation",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- getResult()
+ getResult("log")
},
}
-func init() {
- getCmd.AddCommand(testresultCmd)
- testresultCmd.Flags().StringVarP(&JmeterResult, "r", "r", "", "Result to retrieve - use 'log' or 'results'")
- testresultCmd.MarkFlagRequired("r")
+var log_testresultCmd = &cobra.Command{
+ Use: "log",
+ Short: "Get jmeter summary log results",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ getResult("log")
+ },
+}
+var detail_testresultCmd = &cobra.Command{
+ Use: "detail",
+ Short: "Get jmeter detailed results",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ getResult("detail")
+ },
}
+func init() {
+ getCmd.AddCommand(testresultCmd)
+ testresultCmd.AddCommand(log_testresultCmd)
+ testresultCmd.AddCommand(detail_testresultCmd)
+}
-func getResult() {
- switch JmeterResult {
- case "results":
+func getResult(result_type string) {
+ checkControllerIP()
+ switch result_type {
+ case "detail":
url := controllerIP + "/jmeter/results/results"
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\nResponse Body: %v\n", resp)
case "log":
@@ -47,10 +64,12 @@ func getResult() {
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\nResponse Body: %v\n", resp)
default:
- fmt.Println("Unrecoginized jmeter result type - use 'log' or 'results'")
+ msg := "Unrecoginized jmeter result type"
+ fmt.Printf("%s - use 'log' or 'detail'", msg)
}
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/get_visibility.go b/clover/cloverctl/src/cloverctl/cmd/get_visibility.go
index 820b25a..a701164 100644
--- a/clover/cloverctl/src/cloverctl/cmd/get_visibility.go
+++ b/clover/cloverctl/src/cloverctl/cmd/get_visibility.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -27,12 +28,15 @@ var visibilitygetCmd = &cobra.Command{
func init() {
getCmd.AddCommand(visibilitygetCmd)
- visibilitygetCmd.PersistentFlags().StringVarP(&VisibilityStat, "stat", "s", "", "Visibility stats type to get")
- visibilitygetCmd.PersistentFlags().StringVarP(&VisibilityConfig, "conf", "c", "", "Visibility config type to get")
+ visibilitygetCmd.PersistentFlags().StringVarP(&VisibilityStat, "stat", "s",
+ "", "Visibility stats type to get")
+ visibilitygetCmd.PersistentFlags().StringVarP(&VisibilityConfig, "conf",
+ "c", "", "Visibility config type to get")
}
func getVisibility() {
+ checkControllerIP()
url_prefix := "/visibility/get/"
get_data := "all"
response_prefix := "Config"
@@ -50,7 +54,8 @@ func getVisibility() {
SetHeader("Accept", "application/json").
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%s %s: %v\n", response_prefix, get_data, resp)
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/init.go b/clover/cloverctl/src/cloverctl/cmd/init.go
index 613b263..102c5ba 100644
--- a/clover/cloverctl/src/cloverctl/cmd/init.go
+++ b/clover/cloverctl/src/cloverctl/cmd/init.go
@@ -17,7 +17,7 @@ var initCmd = &cobra.Command{
Short: "Initialize visibility schemas",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("init called")
+ fmt.Println("Incomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/init_visibility.go b/clover/cloverctl/src/cloverctl/cmd/init_visibility.go
index ac9ec5c..004c300 100644
--- a/clover/cloverctl/src/cloverctl/cmd/init_visibility.go
+++ b/clover/cloverctl/src/cloverctl/cmd/init_visibility.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -16,7 +17,7 @@ import (
var visibilityinitCmd = &cobra.Command{
Use: "visibility",
- Short: "Init visibility data schemas",
+ Short: "Initialize visibility data schemas in cassandra",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
initCollector()
@@ -28,14 +29,15 @@ func init() {
}
func initCollector() {
+
+ checkControllerIP()
url := controllerIP + "/collector/init"
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
}
-
-
diff --git a/clover/cloverctl/src/cloverctl/cmd/provider.go b/clover/cloverctl/src/cloverctl/cmd/provider.go
index fc8e888..e6f1cc8 100644
--- a/clover/cloverctl/src/cloverctl/cmd/provider.go
+++ b/clover/cloverctl/src/cloverctl/cmd/provider.go
@@ -17,7 +17,7 @@ var providercreateCmd = &cobra.Command{
Short: "Add spinnaker provider",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("provider called")
+ fmt.Println("Incomplete command")
},
}
@@ -26,7 +26,7 @@ var providerdelCmd = &cobra.Command{
Short: "Delete spinnaker provider",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("provider called")
+ fmt.Println("Incomplete command")
},
}
@@ -35,7 +35,7 @@ var providergetCmd = &cobra.Command{
Short: "Get spinnaker provider",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("provider called")
+ fmt.Println("Incomplete command")
},
}
func init() {
diff --git a/clover/cloverctl/src/cloverctl/cmd/root.go b/clover/cloverctl/src/cloverctl/cmd/root.go
index 6878077..d1d9405 100644
--- a/clover/cloverctl/src/cloverctl/cmd/root.go
+++ b/clover/cloverctl/src/cloverctl/cmd/root.go
@@ -10,6 +10,7 @@ package cmd
import (
"fmt"
"os"
+ "strings"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
@@ -18,7 +19,6 @@ import (
)
var cfgFile string
-
var controllerIP string
var cloverFile string
@@ -33,8 +33,9 @@ var rootCmd = &cobra.Command{
//},
}
-// Execute adds all child commands to the root command and sets flags appropriately.
-// This is called by main.main(). It only needs to happen once to the rootCmd.
+// Execute adds all child commands to the root command and sets flags
+// appropriately. This is called by main.main(). It only needs to happen
+// once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
@@ -48,19 +49,12 @@ func init() {
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
- rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cloverctl.yaml)")
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "",
+ "config file (default is $HOME/.cloverctl.yaml)")
// Cobra also supports local flags, which will only run
// when this action is called directly.
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
-
- cPort, cIP := cloverkube.GetServicesPortIP("clover-controller")
- if cIP == "" {
- controllerIP = "http://10.244.0.1:" + fmt.Sprint(cPort)
- } else {
- controllerIP = "http://" + cIP
- }
- fmt.Printf("\nclover-controller: %s %s\n", fmt.Sprint(cPort), cIP)
}
// initConfig reads in config file and ENV variables if set.
@@ -76,15 +70,51 @@ func initConfig() {
os.Exit(1)
}
- // Search config in home directory with name ".cloverctl" (without extension).
+ // Search config in home directory with name ".cloverctl"
viper.AddConfigPath(home)
viper.SetConfigName(".cloverctl")
}
viper.AutomaticEnv() // read in environment variables that match
- // If a config file is found, read it in.
+ cip_file := ""
+ // If a config file is found in home, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
+ cip_file = viper.GetString("ControllerIP")
+ } else {
+ // Check for file in path from cloverctl
+ ep, err := os.Executable()
+ if err == nil {
+ exe_path := strings.Replace(ep, "cloverctl", "", -1)
+ viper.AddConfigPath(exe_path)
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ cip_file = viper.GetString("ControllerIP")
+ }
+ }
+ }
+
+ cPort, cip_kube := cloverkube.GetServicesPortIP("clover-controller")
+ // If IP in file
+ if cip_file != "" {
+ // Nodeport
+ controllerIP = "http://" + cip_file + ":" + fmt.Sprint(cPort)
+ }
+ // Override IP, if LB IP found
+ if cip_kube != "" {
+ fmt.Printf("IP %v", cip_kube)
+ controllerIP = "http://" + cip_kube
+ }
+}
+
+func checkControllerIP() {
+ // controllerIP exists
+ service := "clover-controller"
+ if controllerIP == "" {
+ fmt.Printf("%s address unspecified or cannot be found\n", service)
+ os.Exit(1)
+ } else {
+ fmt.Printf("%s address: %s\n", service, controllerIP)
}
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/set_nginx.go b/clover/cloverctl/src/cloverctl/cmd/set_nginx.go
index e7e65c2..6b571bb 100644
--- a/clover/cloverctl/src/cloverctl/cmd/set_nginx.go
+++ b/clover/cloverctl/src/cloverctl/cmd/set_nginx.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"io/ioutil"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
@@ -36,17 +37,19 @@ var setlbCmd = &cobra.Command{
func init() {
setCmd.AddCommand(setserverCmd)
- setserverCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file for server config")
+ setserverCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input yaml file for server config")
setserverCmd.MarkFlagRequired("file")
setCmd.AddCommand(setlbCmd)
- setlbCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file for lb config")
+ setlbCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input yaml file for lb config")
setlbCmd.MarkFlagRequired("file")
-
}
func setNginx(nginx_service string) {
+ checkControllerIP()
url := ""
if nginx_service == "server" {
url = controllerIP + "/nginx/server"
@@ -57,20 +60,20 @@ func setNginx(nginx_service string) {
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
fmt.Println("Please specify a valid yaml file")
- return
+ os.Exit(1)
}
out_json, err := yaml.YAMLToJSON(in)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
-
-
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/set_visibility.go b/clover/cloverctl/src/cloverctl/cmd/set_visibility.go
index 685b250..ec28122 100644
--- a/clover/cloverctl/src/cloverctl/cmd/set_visibility.go
+++ b/clover/cloverctl/src/cloverctl/cmd/set_visibility.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"io/ioutil"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
@@ -27,31 +28,32 @@ var setvisibilitystatsCmd = &cobra.Command{
func init() {
setCmd.AddCommand(setvisibilitystatsCmd)
- setvisibilitystatsCmd.Flags().StringVarP(&cloverFile, "file", "f", "", "Input yaml file to set visibility config")
+ setvisibilitystatsCmd.Flags().StringVarP(&cloverFile, "file", "f", "",
+ "Input yaml file to set visibility config")
setvisibilitystatsCmd.MarkFlagRequired("file")
-
}
func setCollector() {
+ checkControllerIP()
url := controllerIP + "/visibility/set"
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
fmt.Println("Please specify a valid yaml file")
- return
+ os.Exit(1)
}
out_json, err := yaml.YAMLToJSON(in)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
SetBody(out_json).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
-
-
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/start.go b/clover/cloverctl/src/cloverctl/cmd/start.go
index 741eacd..d2eb864 100644
--- a/clover/cloverctl/src/cloverctl/cmd/start.go
+++ b/clover/cloverctl/src/cloverctl/cmd/start.go
@@ -14,10 +14,10 @@ import (
var startCmd = &cobra.Command{
Use: "start",
- Short: "Start processes including tests, visibility and ingress services",
+ Short: "Start processes and tests",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("start called")
+ fmt.Println("Incomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/start_ids.go b/clover/cloverctl/src/cloverctl/cmd/start_ids.go
index 0f495a7..be039fa 100644
--- a/clover/cloverctl/src/cloverctl/cmd/start_ids.go
+++ b/clover/cloverctl/src/cloverctl/cmd/start_ids.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -16,8 +17,8 @@ import (
var startidsCmd = &cobra.Command{
Use: "ids",
- Short: "Start IDS process",
- Long: `Restart IDS process when adding custom rules`,
+ Short: "Start snort IDS process",
+ Long: `Restart snort IDS process when adding custom rules`,
Run: func(cmd *cobra.Command, args []string) {
startIDS()
},
@@ -29,14 +30,14 @@ func init() {
func startIDS() {
+ checkControllerIP()
url := controllerIP + "/snort/start"
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
}
-
-
diff --git a/clover/cloverctl/src/cloverctl/cmd/start_testplan.go b/clover/cloverctl/src/cloverctl/cmd/start_testplan.go
index b516ad6..9e664c0 100644
--- a/clover/cloverctl/src/cloverctl/cmd/start_testplan.go
+++ b/clover/cloverctl/src/cloverctl/cmd/start_testplan.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"strings"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
@@ -18,20 +19,21 @@ import (
var testplanstartCmd = &cobra.Command{
Use: "testplan",
- Short: "Start a test for a given test plan",
+ Short: "Start test for configured test plan",
Long: `Specify number of slaves to use with '-s' flag. Default is 0 slaves,
-which runs tests only from jmeter-master.`,
+which runs tests from jmeter-master only.`,
Run: func(cmd *cobra.Command, args []string) {
startTest()
- //fmt.Printf("%v\n", cmd.Parent().CommandPath())
},
}
var num_slaves int
func init() {
startCmd.AddCommand(testplanstartCmd)
- testplanstartCmd.PersistentFlags().StringVarP(&cloverFile, "file", "f", "", "Currently unused")
- testplanstartCmd.PersistentFlags().IntVarP(&num_slaves, "slaves", "s", 0, "Number of slaves to use")
+ testplanstartCmd.PersistentFlags().StringVarP(&cloverFile, "file", "f", "",
+ "Currently unused")
+ testplanstartCmd.PersistentFlags().IntVarP(&num_slaves, "slaves", "s", 0,
+ "Number of slaves to use")
}
func startTest() {
@@ -39,21 +41,24 @@ func startTest() {
ips := cloverkube.GetPodsIP("clover-jmeter-slave", "default")
fmt.Printf("\njmeter-slaves found: %s\n", ips)
if num_slaves > len(ips) {
- fmt.Printf("Number of slaves specified must be less than found: %d\n", len(ips))
+ fmt.Printf("Number of slaves specified must be less than found: %d\n",
+ len(ips))
return
}
ip_list := strings.Join(ips[0:num_slaves], ",")
+ checkControllerIP()
url := controllerIP + "/jmeter/start"
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
- SetBody(fmt.Sprintf(`{"num_slaves":"%d", "slave_list":"%s"}`, num_slaves, ip_list)).
+ SetBody(fmt.Sprintf(`{"num_slaves":"%d", "slave_list":"%s"}`, num_slaves,
+ ip_list)).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
-
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/start_visibility.go b/clover/cloverctl/src/cloverctl/cmd/start_visibility.go
index 18f8aac..bbc25d8 100644
--- a/clover/cloverctl/src/cloverctl/cmd/start_visibility.go
+++ b/clover/cloverctl/src/cloverctl/cmd/start_visibility.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"io/ioutil"
"gopkg.in/resty.v1"
"github.com/ghodss/yaml"
@@ -18,7 +19,7 @@ import (
var visibilitystartCmd = &cobra.Command{
Use: "visibility",
- Short: "Start visibility data collection",
+ Short: "Start visibility collector process",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
startCollector()
@@ -36,23 +37,27 @@ func startCollector() {
if cloverFile != "" {
in, err := ioutil.ReadFile(cloverFile)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot read file: %v\n", err)
+ os.Exit(1)
}
out_json, err := yaml.YAMLToJSON(in)
message_body = string(out_json)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Invalid yaml: %v\n", err)
+ os.Exit(1)
}
} else {
message_body = `{"sample_interval":"10", "t_port":"80", "t_host":"jaeger-query.istio-system"}`
}
+ checkControllerIP()
url := controllerIP + "/collector/start"
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
SetBody(message_body).
Post(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/stop.go b/clover/cloverctl/src/cloverctl/cmd/stop.go
index cfb7245..e77a36b 100644
--- a/clover/cloverctl/src/cloverctl/cmd/stop.go
+++ b/clover/cloverctl/src/cloverctl/cmd/stop.go
@@ -14,10 +14,10 @@ import (
var stopCmd = &cobra.Command{
Use: "stop",
- Short: "Stop processes including visibility and ingress services",
+ Short: "Stop processes including visibility and sample services",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("stop called")
+ fmt.Println("Incomplete command")
},
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/stop_ids.go b/clover/cloverctl/src/cloverctl/cmd/stop_ids.go
index b39b1e9..c810d9d 100644
--- a/clover/cloverctl/src/cloverctl/cmd/stop_ids.go
+++ b/clover/cloverctl/src/cloverctl/cmd/stop_ids.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -16,8 +17,8 @@ import (
var stopidsCmd = &cobra.Command{
Use: "ids",
- Short: "Stop IDS process",
- Long: `Restart IDS process when adding custom rules`,
+ Short: "Stop snort IDS process",
+ Long: `Restart snort IDS process when adding custom rules`,
Run: func(cmd *cobra.Command, args []string) {
stopIDS()
},
@@ -29,12 +30,14 @@ func init() {
func stopIDS() {
+ checkControllerIP()
url := controllerIP + "/snort/stop"
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
}
diff --git a/clover/cloverctl/src/cloverctl/cmd/stop_visibility.go b/clover/cloverctl/src/cloverctl/cmd/stop_visibility.go
index 4233157..fda226e 100644
--- a/clover/cloverctl/src/cloverctl/cmd/stop_visibility.go
+++ b/clover/cloverctl/src/cloverctl/cmd/stop_visibility.go
@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
+ "os"
"gopkg.in/resty.v1"
"github.com/spf13/cobra"
)
@@ -16,7 +17,7 @@ import (
var visibilitystopCmd = &cobra.Command{
Use: "visibility",
- Short: "Stop visibility data collection",
+ Short: "Stop visibility collector process",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
stopCollector()
@@ -29,12 +30,14 @@ func init() {
func stopCollector() {
+ checkControllerIP()
url := controllerIP + "/collector/stop"
resp, err := resty.R().
Get(url)
if err != nil {
- panic(err.Error())
+ fmt.Printf("Cannot connect to controller: %v\n", err)
+ os.Exit(1)
}
fmt.Printf("\n%v\n", resp)
}
diff --git a/clover/cloverctl/src/cloverctl/yaml/jmeter_testplan.yaml b/clover/cloverctl/src/cloverctl/yaml/jmeter_testplan.yaml
index 140e70f..92d53ea 100644
--- a/clover/cloverctl/src/cloverctl/yaml/jmeter_testplan.yaml
+++ b/clover/cloverctl/src/cloverctl/yaml/jmeter_testplan.yaml
@@ -1,11 +1,14 @@
load_spec:
- num_threads: 1
- loops: 1
+ num_threads: 5
+ loops: 2
ramp_time: 60
+ duration: 80
url_list:
- name: url1
url: http://proxy-access-control.default:9180
method: GET
+ user-agent: chrome
- name: url2
url: http://proxy-access-control.default:9180
method: GET
+ user-agent: safari
diff --git a/clover/clovisor/Dockerfile b/clover/clovisor/Dockerfile
index 4df4ee5..63375a1 100644
--- a/clover/clovisor/Dockerfile
+++ b/clover/clovisor/Dockerfile
@@ -1,6 +1,6 @@
FROM ubuntu:18.04
-ARG TARGET_KERNEL_VER
+ARG TARGET_KERNEL_VER="4.15.0-36-generic"
RUN set -ex; \
echo "deb [trusted=yes] http://repo.iovisor.org/apt/bionic bionic main" > /etc/apt/sources.list.d/iovisor.list; \
@@ -12,6 +12,7 @@ RUN set -ex; \
libelf1;
COPY . .
+COPY bin/clovisor .
RUN chmod +x clovisor
CMD ["./clovisor"]
diff --git a/clover/controller/control/api/jmeter.py b/clover/controller/control/api/jmeter.py
index 09625f5..3e8b86a 100644
--- a/clover/controller/control/api/jmeter.py
+++ b/clover/controller/control/api/jmeter.py
@@ -16,29 +16,34 @@ import logging
jmeter = Blueprint('jmeter', __name__)
grpc_port = '50054'
-pod_name = 'clover-jmeter-master'
+pod_name = 'clover-jmeter-master.default'
jmeter_grpc = pod_name + ':' + grpc_port
channel = grpc.insecure_channel(jmeter_grpc)
stub = jmeter_pb2_grpc.ControllerStub(channel)
@jmeter.route("/jmeter/gen", methods=['GET', 'POST'])
+@jmeter.route("/jmeter/create", methods=['GET', 'POST'])
def gentest():
try:
p = request.json
u_list = []
u_names = []
u_methods = []
+ u_agents = []
try:
for u in p['url_list']:
u_list.append(u['url'])
u_names.append(u['name'])
u_methods.append(u['method'])
+ u_agents.append(u['user-agent'])
url_list = pickle.dumps(u_list)
url_names = pickle.dumps(u_names)
url_methods = pickle.dumps(u_methods)
+ url_agents = pickle.dumps(u_agents)
num_threads = p['load_spec']['num_threads']
ramp_time = p['load_spec']['ramp_time']
+ duration = p['load_spec']['duration']
loops = p['load_spec']['loops']
except (KeyError, ValueError) as e:
logging.debug(e)
@@ -46,7 +51,7 @@ def gentest():
response = stub.GenTest(jmeter_pb2.ConfigJmeter(
url_list=url_list, url_names=url_names, url_methods=url_methods,
num_threads=str(num_threads), ramp_time=str(ramp_time),
- loops=str(loops)))
+ url_agents=url_agents, duration=str(duration), loops=str(loops)))
except Exception as e:
logging.debug(e)
if e.__class__.__name__ == "_Rendezvous":
diff --git a/clover/controller/control/templates/request_counts.html b/clover/controller/control/templates/request_counts.html
new file mode 100644
index 0000000..ecf458a
--- /dev/null
+++ b/clover/controller/control/templates/request_counts.html
@@ -0,0 +1,21 @@
+<div class="large-4 medium-3 cell clover-portlet small-offset-2">
+ <h5>Per Service Counts</h5>
+ <div class="span_node_id_all">
+ </div>
+</div>
+<div class="large-4 medium-3 cell clover-portlet">
+ <h5>Per URL Counts (all services)</h5>
+ <div class="span_urls_all">
+ </div>
+</div>
+
+<div class="large-4 medium-3 cell clover-portlet small-offset-2">
+ <h5>Per URL / HTTP Status Codes (all services)</h5>
+ <div class="status_codes_all">
+ </div>
+</div>
+<div class="large-4 medium-3 cell clover-portlet">
+ <h5>User-Agent Percentage</h5>
+ <div class="span_user_agents_all" id="span_user_agents_all">
+ </div>
+</div>
diff --git a/clover/spark/docker/clover-spark/build.sh b/clover/spark/docker/clover-spark/build.sh
index a1a8788..d139b35 100755
--- a/clover/spark/docker/clover-spark/build.sh
+++ b/clover/spark/docker/clover-spark/build.sh
@@ -7,7 +7,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-IMAGE_PATH=${IMAGE_PATH:-"kube1-node1:5000"}
+IMAGE_PATH=${IMAGE_PATH:-"localhost:5000"}
IMAGE_NAME=${IMAGE_NAME:-"clover-spark:latest"}
# Copy clover-spark jar first
diff --git a/clover/spark/docker/spark-submit/runner.sh b/clover/spark/docker/spark-submit/runner.sh
index b98ff32..5de3824 100755
--- a/clover/spark/docker/spark-submit/runner.sh
+++ b/clover/spark/docker/spark-submit/runner.sh
@@ -12,7 +12,7 @@
./runner_fast.sh &
IMAGE_NAME=${IMAGE_NAME:-"clover-spark:latest"}
-IMAGE_PATH=${IMAGE_PATH:-"localhost:5000"}
+IMAGE_PATH=${IMAGE_PATH:-"opnfv"}
CLASS_NAME=${CLASS_NAME:-"CloverSlow"}
JAR_NAME=${JAR_NAME:-"clover-spark_2.11-1.0.jar"}
diff --git a/clover/spark/docker/spark-submit/runner_fast.sh b/clover/spark/docker/spark-submit/runner_fast.sh
index 2381351..0a387b2 100755
--- a/clover/spark/docker/spark-submit/runner_fast.sh
+++ b/clover/spark/docker/spark-submit/runner_fast.sh
@@ -9,7 +9,7 @@
#
IMAGE_NAME=${IMAGE_NAME:-"clover-spark:latest"}
-IMAGE_PATH=${IMAGE_PATH:-"localhost:5000"}
+IMAGE_PATH=${IMAGE_PATH:-"opnfv"}
CLASS_NAME=${CLASS_NAME:-"CloverFast"}
JAR_NAME=${JAR_NAME:-"clover-spark_2.11-1.0.jar"}
diff --git a/clover/spark/src/main/scala/CloverSlow.scala b/clover/spark/src/main/scala/CloverSlow.scala
index 1866d72..c389967 100644
--- a/clover/spark/src/main/scala/CloverSlow.scala
+++ b/clover/spark/src/main/scala/CloverSlow.scala
@@ -42,38 +42,38 @@ object CloverSlow {
.config("spark.cassandra.connection.port", "9042")
.getOrCreate()
- val services = redis.smembers("visibility_services")
-
spark
.read.cassandraFormat("spans", "visibility")
.load()
.createOrReplaceTempView("curspans")
- if (distinct_url_service) {
- // Get number of distinct URLs per service (node_id)
- for (s <- services.get) {
- val service = s.get
- val perurl = spark.sql(
- s"""
- |SELECT node_id,count(distinct http_url)
- |as urls,collect_set(http_url) as values
- |FROM curspans
- |WHERE node_id LIKE '%$service%'
- |GROUP BY node_id
- """.stripMargin)
- for ((row) <- perurl.collect) {
- println(row)
- val node_id = row.get(0)
- val url_count = row.get(1)
- val url_distinct = row.getList(2).toString
- redis.hmset(service, Map("node_id" -> node_id,
- "url_count" -> url_count,
- "url_distinct" -> url_distinct))
+ for( x <- 1 to 500 ) {
+
+ val services = redis.smembers("visibility_services")
+
+ if (distinct_url_service) {
+ // Get number of distinct URLs per service (node_id)
+ for (s <- services.get) {
+ val service = s.get
+ val perurl = spark.sql(
+ s"""
+ |SELECT node_id,count(distinct http_url)
+ |as urls,collect_set(http_url) as values
+ |FROM curspans
+ |WHERE node_id LIKE '%$service%'
+ |GROUP BY node_id
+ """.stripMargin)
+ for ((row) <- perurl.collect) {
+ println(row)
+ val node_id = row.get(0)
+ val url_count = row.get(1)
+ val url_distinct = row.getList(2).toString
+ redis.hmset(service, Map("node_id" -> node_id,
+ "url_count" -> url_count,
+ "url_distinct" -> url_distinct))
+ }
}
}
- }
-
- for( x <- 1 to 500 ) {
if (response_times) {
try {
diff --git a/clover/tools/jmeter/jmeter-master/grpc/jmeter.proto b/clover/tools/jmeter/jmeter-master/grpc/jmeter.proto
index 7213faa..f65ed6c 100644
--- a/clover/tools/jmeter/jmeter-master/grpc/jmeter.proto
+++ b/clover/tools/jmeter/jmeter-master/grpc/jmeter.proto
@@ -29,8 +29,10 @@ message ConfigJmeter {
string url_names = 3;
string url_protocols = 4;
string url_methods = 5;
- string loops = 6;
- string ramp_time = 7;
+ string url_agents = 6;
+ string loops = 7;
+ string ramp_time = 8;
+ string duration = 9;
}
message JmeterReply {
diff --git a/clover/tools/jmeter/jmeter-master/grpc/jmeter_pb2.py b/clover/tools/jmeter/jmeter-master/grpc/jmeter_pb2.py
index e4a75fd..7a2d62c 100644
--- a/clover/tools/jmeter/jmeter-master/grpc/jmeter_pb2.py
+++ b/clover/tools/jmeter/jmeter-master/grpc/jmeter_pb2.py
@@ -19,7 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
name='jmeter.proto',
package='jmeter',
syntax='proto3',
- serialized_pb=_b('\n\x0cjmeter.proto\x12\x06jmeter\"F\n\nTestParams\x12\x12\n\nnum_slaves\x18\x01 \x01(\t\x12\x11\n\ttest_plan\x18\x02 \x01(\t\x12\x11\n\tslave_ips\x18\x03 \x01(\t\"\x96\x01\n\x0c\x43onfigJmeter\x12\x10\n\x08url_list\x18\x01 \x01(\t\x12\x13\n\x0bnum_threads\x18\x02 \x01(\t\x12\x11\n\turl_names\x18\x03 \x01(\t\x12\x15\n\rurl_protocols\x18\x04 \x01(\t\x12\x13\n\x0burl_methods\x18\x05 \x01(\t\x12\r\n\x05loops\x18\x06 \x01(\t\x12\x11\n\tramp_time\x18\x07 \x01(\t\"\x1e\n\x0bJmeterReply\x12\x0f\n\x07message\x18\x01 \x01(\t\",\n\x08JResults\x12\x10\n\x08r_format\x18\x01 \x01(\t\x12\x0e\n\x06r_file\x18\x02 \x01(\t2\xb3\x01\n\nController\x12\x36\n\x07GenTest\x12\x14.jmeter.ConfigJmeter\x1a\x13.jmeter.JmeterReply\"\x00\x12\x36\n\tStartTest\x12\x12.jmeter.TestParams\x1a\x13.jmeter.JmeterReply\"\x00\x12\x35\n\nGetResults\x12\x10.jmeter.JResults\x1a\x13.jmeter.JmeterReply\"\x00\x62\x06proto3')
+ serialized_pb=_b('\n\x0cjmeter.proto\x12\x06jmeter\"F\n\nTestParams\x12\x12\n\nnum_slaves\x18\x01 \x01(\t\x12\x11\n\ttest_plan\x18\x02 \x01(\t\x12\x11\n\tslave_ips\x18\x03 \x01(\t\"\xbc\x01\n\x0c\x43onfigJmeter\x12\x10\n\x08url_list\x18\x01 \x01(\t\x12\x13\n\x0bnum_threads\x18\x02 \x01(\t\x12\x11\n\turl_names\x18\x03 \x01(\t\x12\x15\n\rurl_protocols\x18\x04 \x01(\t\x12\x13\n\x0burl_methods\x18\x05 \x01(\t\x12\x12\n\nurl_agents\x18\x06 \x01(\t\x12\r\n\x05loops\x18\x07 \x01(\t\x12\x11\n\tramp_time\x18\x08 \x01(\t\x12\x10\n\x08\x64uration\x18\t \x01(\t\"\x1e\n\x0bJmeterReply\x12\x0f\n\x07message\x18\x01 \x01(\t\",\n\x08JResults\x12\x10\n\x08r_format\x18\x01 \x01(\t\x12\x0e\n\x06r_file\x18\x02 \x01(\t2\xb3\x01\n\nController\x12\x36\n\x07GenTest\x12\x14.jmeter.ConfigJmeter\x1a\x13.jmeter.JmeterReply\"\x00\x12\x36\n\tStartTest\x12\x12.jmeter.TestParams\x1a\x13.jmeter.JmeterReply\"\x00\x12\x35\n\nGetResults\x12\x10.jmeter.JResults\x1a\x13.jmeter.JmeterReply\"\x00\x62\x06proto3')
)
@@ -113,19 +113,33 @@ _CONFIGJMETER = _descriptor.Descriptor(
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='loops', full_name='jmeter.ConfigJmeter.loops', index=5,
+ name='url_agents', full_name='jmeter.ConfigJmeter.url_agents', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='ramp_time', full_name='jmeter.ConfigJmeter.ramp_time', index=6,
+ name='loops', full_name='jmeter.ConfigJmeter.loops', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='ramp_time', full_name='jmeter.ConfigJmeter.ramp_time', index=7,
+ number=8, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='duration', full_name='jmeter.ConfigJmeter.duration', index=8,
+ number=9, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -139,7 +153,7 @@ _CONFIGJMETER = _descriptor.Descriptor(
oneofs=[
],
serialized_start=97,
- serialized_end=247,
+ serialized_end=285,
)
@@ -169,8 +183,8 @@ _JMETERREPLY = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=249,
- serialized_end=279,
+ serialized_start=287,
+ serialized_end=317,
)
@@ -207,8 +221,8 @@ _JRESULTS = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=281,
- serialized_end=325,
+ serialized_start=319,
+ serialized_end=363,
)
DESCRIPTOR.message_types_by_name['TestParams'] = _TESTPARAMS
@@ -253,8 +267,8 @@ _CONTROLLER = _descriptor.ServiceDescriptor(
file=DESCRIPTOR,
index=0,
options=None,
- serialized_start=328,
- serialized_end=507,
+ serialized_start=366,
+ serialized_end=545,
methods=[
_descriptor.MethodDescriptor(
name='GenTest',
diff --git a/clover/tools/jmeter/jmeter-master/grpc/jmeter_server.py b/clover/tools/jmeter/jmeter-master/grpc/jmeter_server.py
index cef180c..9f9e561 100644
--- a/clover/tools/jmeter/jmeter-master/grpc/jmeter_server.py
+++ b/clover/tools/jmeter/jmeter-master/grpc/jmeter_server.py
@@ -37,6 +37,7 @@ class Controller(jmeter_pb2_grpc.ControllerServicer):
template_file = 'tests/jmx.template'
unames = pickle.loads(r.url_names)
umethods = pickle.loads(r.url_methods)
+ uagents = pickle.loads(r.url_agents)
ulist = []
for url in pickle.loads(r.url_list):
u = urlparse(url)
@@ -58,7 +59,9 @@ class Controller(jmeter_pb2_grpc.ControllerServicer):
num_threads=r.num_threads,
url_names=unames,
url_methods=umethods,
+ url_agents=uagents,
ramp_time=r.ramp_time,
+ duration=r.duration,
loops=r.loops,
url_list=ulist
)
diff --git a/clover/tools/jmeter/jmeter-master/tests/jmx.template b/clover/tools/jmeter/jmeter-master/tests/jmx.template
index 1a6fa95..ad3414d 100644
--- a/clover/tools/jmeter/jmeter-master/tests/jmx.template
+++ b/clover/tools/jmeter/jmeter-master/tests/jmx.template
@@ -23,7 +23,7 @@
<longProp name="ThreadGroup.start_time">1385457190000</longProp>
<longProp name="ThreadGroup.end_time">1385457190000</longProp>
<boolProp name="ThreadGroup.scheduler">true</boolProp>
- <stringProp name="ThreadGroup.duration">60</stringProp>
+ <stringProp name="ThreadGroup.duration">{{ duration }}</stringProp>
<stringProp name="ThreadGroup.delay"/>
<boolProp name="ThreadGroup.delayedStart">true</boolProp>
</ThreadGroup>
@@ -49,7 +49,17 @@
<stringProp name="HTTPSampler.embedded_url_re"/>
<stringProp name="HTTPSampler.implementation"/>
</HTTPSampler>
+ <hashTree>
+ <HeaderManager guiclass="HeaderPanel" testclass="HeaderManager" testname="HTTP Header Manager" enabled="true">
+ <collectionProp name="HeaderManager.headers">
+ <elementProp name="" elementType="Header">
+ <stringProp name="Header.name">User-Agent</stringProp>
+ <stringProp name="Header.value">{{ url_agents[loop.index0] }}</stringProp>
+ </elementProp>
+ </collectionProp>
+ </HeaderManager>
<hashTree/>
+ </hashTree>
{%- endfor %}
diff --git a/docs/development/design/clovisor.rst b/docs/development/design/clovisor.rst
new file mode 100644
index 0000000..e829bff
--- /dev/null
+++ b/docs/development/design/clovisor.rst
@@ -0,0 +1,205 @@
+########
+Clovisor
+########
+
+*****************
+What is Clovisor?
+*****************
+
+One of Clover's goals is to investigate an optimal way to perform network
+tracing in cloud native environment. Clovisor is project Clover's initial
+attempt to provide such solution.
+
+Clovisor is named due to it being "Clover's use of IOVisor". `IOVisor`_ is a
+set of tools to ease eBPF code development for tracing, monitoring, and other
+networking functions. BPF stands for Berkeley Packet Filter, an in-kernel
+virtual machine like construct which allows developers to inject bytecodes in
+various kernel event points. More information regarding BPF can be found
+`here`_. Clovisor utilizes the `goBPF`_ module from IOVisor as part of its
+control plane, and primarily uses BPF code to perform packet filtering in the
+data plane.
+
+.. _IOVisor: https://github.com/iovisor
+.. _here: https://cilium.readthedocs.io/en/v1.2/bpf/
+.. _goBPF: https://github.com/iovisor/gobpf
+
+**********************
+Clovisor Functionality
+**********************
+
+Clovisor is primarily a session based network tracing module, that is, it
+generates network traces on a per-session basis, i.e., on a request and response
+pair basis. It records information pertaining to L3/L4 and L7 (just HTTP 1.0 and
+1.1 for now) regarding the session. The traces are sent to Jaeger server who
+acts as tracer, or trace collector.
+
+********************
+Clovisor Requirement
+********************
+
+Clovisor is tested on kernel versions 4.14.x and 4.15.x. For Ubuntu servers
+built-in kernel, it requires Ubuntu version 18.04.
+
+*****************
+Clovisor Workflow
+*****************
+
+Clovisor runs as a `DaemonSet`_ --- that is, it runs on every nodes in a
+Kubernetes cluster, including being automatically launched in newly joined node.
+Clovior runs in the "clovisor" Kubernetes namespace, and it needs to run in
+privilege mode and be granted at least pod and service readable right for the
+Kubernetes namespace(s) in which it is monitoring, i.e., a RBAC needs to be set
+up to grant such access right to the clovisor namespace service account.
+
+Clovisor looks for its configuration(s) from redis server in clover-system
+namespace. The three config info for Clovisor for now are:
+
+#. clovisor_labels, a list of labels which Clovisor would filter for monitoring
+#. clovisor_egress_match, a list of interested egress side IP/port for outbound
+ traffic monitoring
+#. clovisor_jaeger_server, specifying the Jaeger server name / port to send
+ traces to
+
+By default Clovisor would monitor all the pods under the 'default' namespace.
+It will read the service port name associated with the pod under monitoring,
+and use the service port name to determine the network protocol to trace.
+Clovisor expects the same service port naming convention / nomenclature as
+Istio, which is specified in `istio`_. Clovisor extracts expected network
+protocol from these names; some examples are
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: Service
+ [snip]
+ spec:
+ ports:
+ - port: 1234
+ name: http
+
+With the above example in the service specification, Clovisor would specifically
+look to trace HTTP packets for packets matching that destination port number on
+the pods associated with this service, and filter everything else. The
+following has the exact same bahavior
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: Service
+ [snip]
+ spec:
+ ports:
+ - port: 1234
+ name: http-1234
+
+Clovisor derived what TCP port to monitor via the container port exposed by the
+pod in pod spec. In the following example:
+
+.. code-block:: yaml
+
+ spec:
+ containers:
+ - name: foo
+ image: localhost:5000/foo
+ ports:
+ - containerPort: 3456
+
+Packets with destination TCP port number 3456 will be traced for the pod on the
+ingress side, likewise for packet with source TCP port number 3456 on the
+ingress side (for receiving response traffic tracing). This request-response
+pair is sent as a `span`_.
+
+In addition, Clovisor provides egress match configurion where user can
+configure the (optional) IP address of the egress side traffic and TCP port
+number for EGRESS or outbound side packet tracing. This is particularly useful
+for the use case where the pod sends traffic to an external entity (for
+example, sending to an external web site on port 80). User can further specify
+which pod prefix should the rules be applied.
+
+Clovisor is a session-based network tracer, therefore it would trace both the
+request and response packet flow, and extract any information necessary (the
+entire packet from IP header up is copied to user space). In Gambia release
+Clovisor control plane extracts source/destination IP addresses (from request
+packet flow perspective), source/destination TCP port number, and HTTP request
+method/URL/protocol as well as response status/status code/protocol, and
+overall session duration. These information is being logged via OpenTracing
+APIs to Jaeger.
+
+.. _DaemonSet: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+.. _istio: https://istio.io/docs/setup/kubernetes/spec-requirements/
+.. _span: https://github.com/opentracing/specification/blob/master/specification.md
+
+**********************
+Clovisor Control Plane
+**********************
+
+There are two main elements of Clovisor control plane: Kubernetes client and
+BPF control plane using IOVisor BCC.
+
+Kubernetes client is used for the following needs:
+
+#. fetches the pods pertaining to filter ('default' namespace by default
+ without filter)
+#. fetches corresponding service port name to determine network protocol to
+ trace (TCP by default)
+#. extracts veth interface index for pod network interface
+#. watches for pod status change, or if new pod got launched that matches the
+ filter
+
+Clovisor uses goBPF from IOVisor BCC project to build its control plane for BPF
+datapath, which does:
+
+#. via `netlink`_, under the pod veth interface on the Linux host side, creates
+ a `QDisc`_ with name 'classact' with ingress and egress filters created
+ under it
+#. dynamically compiles and loads BPF code "session_tracing.c" and sets ingress
+ and egress functions on the filters created above
+#. sets up perfMap (shared packet buffer between user space and kernel) and
+ sets up kernel channel to poll map write event
+#. sets up timer task to periodically logs and traces interested packets
+
+.. _netlink: https://github.com/vishvananda/netlink
+.. _QDisc: http://tldp.org/HOWTO/Traffic-Control-HOWTO/components.html
+
+*******************
+Clovisor Data Plane
+*******************
+
+Clovisor utilizes BPF for data plane packet analysis in kernel. BPF bytecode
+runs in kernel and is executed as an event handler. Clovisor's BPF program has
+an ingress and egress packet handling functions as loadable modules for
+respective event trigger points, i.e., ingress and egress on a particular Linux
+network interface, which for Clovisor is the pod associated veth. There are
+three tables used by the Clovisor BPF program:
+
+#. dports2proto: control plane -> data plane: the container/service port and
+ corresponding protocol (TCP, HTTP...etc) to trace on the ingress side
+#. egress_lookup_table: control plane -> data plane: the list of egress IP
+ address / ports which Clovisor should trace on the egress side
+#. sessions: data plane -> control plane: BPF creates entries to this table to
+ record TCP sessions
+
+*****************
+Clovisor Clean Up
+*****************
+
+As mentioned above, on a per pod basis, Clovisor creates a qdisc called
+'classact' per each pod veth interface. This kernel object does not get deleted
+by simply killing the Clovisor pod. The cleanup is done via Clovisor either via
+pod removal, or when the Clovisor pod is deleted. However, IF the qdisc is not
+cleaned up, Clovisor would not be able to tap into that same pod, more
+specifically, that pod veth interface. The qdisc can be examined via the
+following command::
+
+ sudo tc qdisc show
+
+and you should see something like this::
+
+ qdisc clsact ffff: dev veth4c47cc75 parent ffff:fff1
+
+in case it wasn't removed at the end, user can manually remove it via::
+
+ sudo tc qdisc del dev veth4c47cc75 clsact
+
+(of course, the qdisc should be removed by Clovisor, otherwise it is a Clovisor
+bug)
diff --git a/docs/development/design/index.rst b/docs/development/design/index.rst
index 1bb89f1..5f950b9 100644
--- a/docs/development/design/index.rst
+++ b/docs/development/design/index.rst
@@ -10,6 +10,7 @@ OPNFV Clover Design Specification
.. toctree::
:maxdepth: 1
+ clovisor
logging
monitoring
tracing
diff --git a/docs/release/configguide/clovisor_config_guide.rst b/docs/release/configguide/clovisor_config_guide.rst
new file mode 100644
index 0000000..9b5f4a3
--- /dev/null
+++ b/docs/release/configguide/clovisor_config_guide.rst
@@ -0,0 +1,156 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Authors of Clover
+
+.. _clovisor_config_guide:
+
+============================
+Clovisor Configuration Guide
+============================
+
+Clovisor requires minimal to no configurations to function as a network tracer.
+It expects configurations to be set at a redis sever running at clover-system
+namespace.
+
+No Configuration
+================
+
+If redis server isn't running as service name **redis** in namespace
+**clover-system** or there isn't any configuration related to Clovisor in that
+redis service, then Clovisor would monitor all pods under the **default**
+namespace. The traces would be sent to **jaeger-collector** service under the
+**clover-system** namespace
+
+Using redis-cli
+===============
+
+Install ``redis-cli`` on the client machine, and look up redis IP address:
+
+.. code-block:: bash
+
+ $ kubectl get services -n clover-system
+
+which one may get something like the following:
+
+.. code-block:: bash
+
+ $
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ redis ClusterIP 10.109.151.40 <none> 6379/TCP 16s
+
+if (like above), the external IP isn't visible, one may be able to get the pod
+IP address directly via the pod (for example, it works with Flannel as CNI
+plugin):
+
+.. code-block:: bash
+
+ $ kubectl get pods -n clover-system -o=wide
+ NAME READY STATUS RESTARTS AGE IP NODE
+ redis 2/2 Running 0 34m 10.244.0.187 clover1804
+
+and one can connect to redis via::
+
+ redis-cli -h 10.244.0.187 -p 6379
+
+Jaeger Collector Configuration
+==============================
+
+Clovisor allows user to specify the Jaeger service for which Clovisor would send
+the network traces to. This is configured via setting the values for
+keys **clovisor_jaeger_collector** and **clovisor_jaeger_agent**::
+
+ redis> SET clovisor_jaeger_collector "jaeger-collector.istio-system:14268"
+ "OK"
+ redis> SET clovisor_jaeger_agent "jaeger-agent.istio-system:6831"
+ "OK"
+
+Configure Monitoring Namespace and Labels
+=========================================
+
+Configruation Value String Format:
+----------------------------------
+
+ <namespace>[:label-key:label-value]
+
+User can configure namespace(s) for Clovisor to tap into via adding namespace
+configuration in redis list **clovisor_labels**::
+
+ redis> LPUSH clovisor_labels "my-namespace"
+ (integer) 1
+
+the above command will cause Clovisor to **NOT** monitor the pods in **default**
+namespace, and only monitor the pods under **my-namespace**.
+
+If user wants to monitor both 'default' and 'my-namespace', she needs to
+explicitly add 'default' namespace back to the list::
+
+ redis> LPUSH clovisor_labels "default"
+ (integer) 2
+ redis> LRANGE clovisor_labels 0 -1
+ 1.) "default"
+ 2.) "my-namespace"
+
+Clovisor allows user to optionally specify which label match on pods to further
+filter the pods to monitor::
+
+ redis> LPUSH clovisor_labels "my-2nd-ns:app:database"
+ (integer) 1
+
+the above configuration would result in Clovisor only monitoring pods in
+my-2nd-ns namespace which matches the label "app:database"
+
+User can specify multiple labels to filter via adding more configuration
+entries::
+
+ redis> LPUSH clovisor_labels "my-2nd-ns:app:web"
+ (integer) 2
+ redis> LRANGE clovisor_labels 0 -1
+ 1.) "my-2nd-ns:app:web"
+ 2.) "my-2nd-ns:app:database"
+
+the result is that Clovisor would monitor pods under namespace my-2nd-ns which
+match **EITHER** app:database **OR** app:web
+
+Currently Clovisor does **NOT** support filtering of more than one label per
+filter, i.e., no configuration option to specify a case where a pod in a
+namespace needs to be matched with TWO or more labels to be monitored
+
+Configure Egress Match IP address, Port Number, and Matching Pods
+=================================================================
+
+Configruation Value String Format:
+----------------------------------
+
+ <IP Address>:<TCP Port Number>[:<Pod Name Prefix>]
+
+By default, Clovisor only traces packets that goes to a pod via its service
+port, and the response packets, i.e., from pod back to client. User can
+configure tracing packet going **OUT** of the pod to the next microservice, or
+an external service also via the **clovior_egress_match** list::
+
+ redis> LPUSH clovior_egress_match "10.0.0.1:3456"
+ (integer) 1
+
+the command above will cause Clovisor to trace packet going out of ALL pods
+under monitoring to match IP address 10.0.0.1 and destination TCP port 3456 on
+the **EGRESS** side --- that is, packets going out of the pod.
+
+User can also choose to ignore the outbound IP address, and only specify the
+port to trace via setting IP address to zero::
+
+ redis> LPUSH clovior_egress_match "0:3456"
+ (integer) 1
+
+the command above will cause Clovisor to trace packets going out of all the pods
+under monitoring that match destination TCP port 3456.
+
+User can further specify a specific pod prefix for such egress rule to be
+applied::
+
+ redis> LPUSH clovior_egress_match "0:3456:proxy"
+ (integer) 1
+
+the command above will cause Clovisor to trace packets going out of pods under
+monitoring which have name starting with the string "proxy" that match destination
+TCP port 3456
diff --git a/docs/release/configguide/controller_services_config_guide.rst b/docs/release/configguide/controller_services_config_guide.rst
new file mode 100644
index 0000000..6671458
--- /dev/null
+++ b/docs/release/configguide/controller_services_config_guide.rst
@@ -0,0 +1,181 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Authors of Clover
+
+.. _controller_services_config_guide:
+
+==============================================
+Clover Controller Services Configuration Guide
+==============================================
+
+This document provides a guide to use the Clover controller services, which are introduced in
+the Clover Gambia release.
+
+Overview
+=========
+
+Clover controller services allow users to control and access information about Clover
+microservices. Two new components are added to Clover to facilitate an ephemeral, cloud native
+workflow. A CLI interface with the name **cloverctl** interfaces to the Kubernetes (k8s)
+API and also to **clover-controller**, a microservice deployed within the k8s cluster to
+instrument other Clover k8s services including sample network services, visibility/validation
+services and supporting datastores (redis, cassandra). The **clover-controller** service
+provides message routing communicating REST with cloverctl or other API/UI interfaces and
+gRPC to internal k8s cluster microservices. It acts as an internal agent and reduces the need
+to expose multiple Clover services outside of a k8s cluster.
+
+The **clover-controller** is packaged as a docker container with manifests to deploy
+in a Kubernetes (k8s) cluster. The **cloverctl** CLI is packaged as a binary (Golang) within a
+tarball with associated yaml files that can be used to configure and control other Clover
+microservices within the k8s cluster via **clover-controller**. The **cloverctl** CLI can also
+deploy/delete other Clover services within the k8s cluster for convenience.
+
+The **clover-controller** service provides the following functions:
+
+ * **REST API:** interface allows CI scripts/automation to control sample network sample services,
+ visibility and validation services. Analyzed visibility data can be consumed by other
+ services with REST messaging.
+
+ * **CLI Endpoint:** acts as an endpoint for many **cloverctl** CLI commands using the
+ **clover-controller** REST API and relays messages to other services via gRPC.
+
+ * **UI Dashboard:** provides a web interface exposing visibility views to interact with
+ Clover visibility services. It presents analyzed visibility data and provides basic controls
+ such as selecting which user services visibility will track.
+
+.. image:: imgs/controller_services.png
+ :align: center
+ :scale: 100%
+
+The **cloverctl** CLI command syntax is similar to k8s kubectl or istio istioctl CLI tools, using
+a <verb> <noun> convention.
+
+Help can be accessed using the ``--help`` option, as shown below::
+
+ $ cloverctl --help
+
+Deploying Clover system services
+================================
+
+Prerequisites
+-------------
+
+The following assumptions must be met before continuing on to deployment:
+
+ * Installation of Docker has already been performed. It's preferable to install Docker CE.
+ * Installation of k8s in a single-node or multi-node cluster.
+
+.. _controller_services_cli:
+
+Download Clover CLI
+-------------------
+
+Download the cloverctl binary from the location below::
+
+ $ curl -L https://github.com/opnfv/clover/raw/stable/gambia/download/cloverctl.tar.gz | tar xz
+ $ cd cloverctl
+ $ export PATH=$PWD:$PATH
+
+To begin deploying Clover services, ensure the correct k8s context is enabled. Validate that
+the CLI can interact with the k8s API with the command::
+
+ $ cloverctl get services
+
+The command above must return a listing of the current k8s services similar to the output of
+'kubectl get svc --all-namespaces'.
+
+.. _controller_services_controller:
+
+Deploying clover-controller
+---------------------------
+
+To deploy the **clover-controller** service, use the command below:
+
+.. code-block:: bash
+
+ $ cloverctl create system controller
+
+The k8s pod listing below must include the **clover-controller** pod in the **clover-system**
+namespace:
+
+.. code-block:: bash
+
+ $ kubectl get pods --all-namespaces | grep clover-controller
+
+ NAMESPACE NAME READY STATUS
+ clover-system clover-controller-74d8596bb5-jczqz 1/1 Running
+
+Exposing clover-controller
+==========================
+
+To expose the **clover-controller** deployment outside of the k8s cluster, a k8s NodePort
+or LoadBalancer service must be employed.
+
+Using NodePort
+--------------
+
+To use a NodePort for the **clover-controller** service, use the following command::
+
+ $ cloverctl create system controller nodeport
+
+The NodePort default is to use port 32044. To modify this, edit the yaml relative
+to the **cloverctl** path at ``yaml/controller/service_nodeport.yaml`` before invoking
+the command above. Delete the ``nodePort:`` key in the yaml to let k8s select an
+available port within the the range 30000-32767.
+
+Using LoadBalancer
+------------------
+
+For k8s clusters that support a LoadBalancer service, such as GKE, one can be created for
+**clover-controller** with the following command::
+
+ $ cloverctl create system controller lb
+
+Setup with cloverctl CLI
+------------------------
+
+The **cloverctl** CLI will communicate with **clover-controller** on the service exposed above
+and requires the IP address of either the load balancer or a cluster node IP address, if a
+NodePort service is used. For a LoadBalancer service, **cloverctl** will automatically find
+the IP address to use and no further action is required.
+
+However, if a NodePort service is used, an additional step is required to configure the IP
+address for **cloverctl** to target. This may be the CNI (ex. flannel/weave) IP address or the IP
+address of an k8s node interface. The **cloverctl** CLI will automatically determine the
+NodePort port number configured. To configure the IP address, create a file named
+``.cloverctl.yaml`` and add a single line to the yaml file with the following::
+
+ ControllerIP: <IP addresss>
+
+This file must be located in your ``HOME`` directory or in the same directory as the **cloverctl**
+binary.
+
+Uninstall from Kubernetes environment
+=====================================
+
+Delete with Clover CLI
+-----------------------
+
+When you're finished working with Clover system services, you can uninstall it with the
+following command:
+
+.. code-block:: bash
+
+ $ cloverctl delete system controller
+ $ cloverctl delete system controller nodeport # for NodePort
+ $ cloverctl delete system controller lb # for LoadBalancer
+
+
+The commands above will remove the clover-controller deployment and service resources
+created from the current k8s context.
+
+Uninstall from Docker environment
+=================================
+
+The OPNFV docker image for the **clover-controller** can be removed with the following commands
+from nodes in the k8s cluster.
+
+.. code-block:: bash
+
+ $ docker rmi opnfv/clover-controller
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
index daf8986..41c1eca 100644
--- a/docs/release/configguide/index.rst
+++ b/docs/release/configguide/index.rst
@@ -3,14 +3,20 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Authors of Clover
-.. _clover_config_guides:
+.. _clover_configguide:
-=================================
-OPNFV Clover Configuration Guides
-=================================
+==========================
+Clover Configuration Guide
+==========================
.. toctree::
:maxdepth: 2
+ controller_services_config_guide.rst
sdc_config_guide.rst
a_b_config_guide.rst
+ jmeter_config_guide.rst
+ visibility_config_guide.rst
+ modsecurity_config_guide.rst
+ spinnaker_config_guide.rst
+ clovisor_config_guide.rst
diff --git a/docs/release/configguide/jmeter_config_guide.rst b/docs/release/configguide/jmeter_config_guide.rst
new file mode 100644
index 0000000..de1d2f5
--- /dev/null
+++ b/docs/release/configguide/jmeter_config_guide.rst
@@ -0,0 +1,298 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Authors of Clover
+
+.. _jmeter_config_guide:
+
+=======================================
+JMeter Validation Configuration Guide
+=======================================
+
+This document provides a guide to use the JMeter validation service, which is introduced in
+the Clover Gambia release.
+
+Overview
+=========
+
+Apache JMeter is a mature, open source application that supports web client emulation. Its
+functionality has been integrated into the Clover project to allow various CI validations
+and performance tests to be performed. The system under test can either be REST services/APIs
+directly or a set of L7 network services. In the latter scenario, Clover nginx servers may
+be employed as an endpoint to allow traffic to be sent end-to-end across a service chain.
+
+The Clover JMeter integration is packaged as docker containers with manifests to deploy
+in a Kubernetes (k8s) cluster. The Clover CLI (**cloverctl**) can be used to configure and
+control the JMeter service within the k8s cluster via **clover-controller**.
+
+The Clover JMeter integration has the following attributes:
+
+ * **Master/Slave Architecture:** uses the native master/slave implementation of JMeter. The master
+ and slaves have distinct OPNFV docker containers for rapid deployment and usage. Slaves allow
+ the scale of the emulation to be increased linearly for performance testing. However, for
+ functional validations and modest scale, the master may be employed without any slaves.
+
+ * **Test Creation & Control:** JMeter makes use of a rich XML-based test plan. While this offers
+ a plethora of configurable options, it can be daunting for a beginner user to edit directly.
+ Clover provides an abstracted yaml syntax exposing a subset of the available configuration
+ parameters. JMeter test plans are generated on the master and tests can be started from
+ **cloverctl** CLI.
+
+ * **Result Collection:** summary log results and detailed per-request results can be retrieved
+ from the JMeter master during and after tests from the **cloverctl** or from a REST API exposed
+ via **clover-controller**.
+
+.. image:: imgs/jmeter_overview.png
+ :align: center
+ :scale: 100%
+
+Deploying Clover JMeter service
+===============================
+
+Prerequisites
+-------------
+
+The following assumptions must be met before continuing on to deployment:
+
+ * Installation of Docker has already been performed. It's preferable to install Docker CE.
+ * Installation of k8s in a single-node or multi-node cluster.
+ * Clover CLI (**cloverctl**) has been downloaded and setup. Instructions to deploy can be found
+ at :ref:`controller_services_controller`
+ * The **clover-controller** service is deployed in the k8s cluster the validation services will
+ be deployed in. Instructions to deploy can be found at :ref:`controller_services_controller`.
+
+Deploy with Clover CLI
+-----------------------
+
+The easiest way to deploy Clover JMeter validation services into your k8s cluster is to use the
+**cloverctl** CLI using the following command:
+
+.. code-block:: bash
+
+ $ cloverctl create system validation
+
+Container images with the Gambia release tag will pulled if the tag is unspecified. The release
+tag is **opnfv-7.0.0** for the Gambia release. To deploy the latest containers from master, use
+the command shown below::
+
+ $ cloverctl create system validation -t latest
+
+The Clover CLI will add master/slave pods to the k8s cluster in the default namespace.
+
+The JMeter master/slave docker images will automatically be pulled from the OPNFV public
+Dockerhub registry. Deployments and respective services will be created with three slave
+replica pods added with the **clover-jmeter-slave** prefix. A single master pod will be
+created with the **clover-jmeter-master** prefix.
+
+Deploy from source
+------------------
+
+To continue to deploy from the source code, clone the Clover git repository and navigate
+within to the directory, as shown below:
+
+.. code-block:: bash
+
+ $ git clone https://gerrit.opnfv.org/gerrit/clover
+ $ cd clover/clover/tools/jmeter/yaml
+ $ git checkout stable/gambia
+
+To deploy the master use the following two commands, which will create a manifest with
+the Gambia release tags and creates the deployment in the k8s cluster::
+
+ $ python render_master.py --image_tag=opnfv-7.0.0 --image_path=opnfv
+ $ kubectl create -f clover-jmeter-master.yaml
+
+JMeter can be injected into an Istio service mesh. To deploy in the default
+namespace within the service mesh, use the following command for manual
+sidecar injection::
+
+ $ istioctl kube-inject -f clover-jmeter-master.yaml | kubectl apply -f -
+
+**Note, when injecting JMeter into the service mesh, only the master will function for
+the Clover integration, as master-slave communication is known not to function with the Java
+RMI API. Ensure 'istioctl' is in your path for the above command.**
+
+To deploy slave replicas, render the manifest yaml and create in k8s adjusting the
+``--replica_count`` value for the number of slave pods desired::
+
+ $ python render_slave.py --image_tag=opnfv-7.0.0 --image_path=opnfv --replica_count=3
+ $ kubectl create -f clover-jmeter-slave.yaml
+
+Verifying the deployment
+------------------------
+
+To verify the validation services are deployed, ensure the following pods are present
+with the command below:
+
+.. code-block:: bash
+
+ $ kubectl get pod --all-namespaces
+
+The listing below must include the following pods assuming deployment in the default
+namespace:
+
+.. code-block:: bash
+
+ NAMESPACE NAME READY STATUS
+ default clover-jmeter-master-688677c96f-8nnnr 1/1 Running
+ default clover-jmeter-slave-7f9695d56-8xh67 1/1 Running
+ default clover-jmeter-slave-7f9695d56-fmpz5 1/1 Running
+ default clover-jmeter-slave-7f9695d56-kg76s 1/1 Running
+ default clover-jmeter-slave-7f9695d56-qfgqj 1/1 Running
+
+Using JMeter Validation
+=======================
+
+Creating a test plan
+--------------------
+
+To employ a test plan that can be used against the :ref:`sdc_config_guide` sample, navigate to
+ cloverctl yaml directory and use the sample named 'jmeter_testplan.yaml', which is shown below.
+
+.. code-block:: bash
+
+ load_spec:
+ num_threads: 5
+ loops: 2
+ ramp_time: 60
+ duration: 80
+ url_list:
+ - name: url1
+ url: http://proxy-access-control.default:9180
+ method: GET
+ user-agent: chrome
+ - name: url2
+ url: http://proxy-access-control.default:9180
+ method: GET
+ user-agent: safari
+
+The composition of the yaml file breaks down as follows:
+ * ``load_spec`` section of the yaml defines the load profile of the test.
+ * `num_threads`` parameter defines the maximum number of clients/users the test will emulate.
+ * ``ramp_time`` determines the rate at which threads/users will be setup.
+ * ``loop`` parameter reruns the same test and can be set to 0 to loop forever.
+ * ``duration`` parameter is used to limit the test run time and be used as a hard cutoff when
+ using loop forever.
+ * ``url_list`` section of the yaml defines a set of HTTP requests that each user will perform.
+ It includes the request URL that is given a name (used as reference in detailed per-request
+ results) and the HTTP method to use (ex. GET, POST). The ``user-agent`` parameter allows this
+ HTTP header to be specified per request and can be used to emulate browsers and devices.
+
+The ``url`` syntax is <domain or IP>:<port #>. The colon port number may be omitted if port 80
+is intended.
+
+The test plan yaml is an abstraction of the JMeter XML syntax (uses .jmx extension) and can be
+pushed to the master using the **cloverctl** CLI with the following command:
+
+.. code-block:: bash
+
+ $ cloverctl create testplan –f jmeter_testplan.yaml
+
+The test plan can now be executed and will automatically be distributed to available JMeter slaves.
+
+Starting the test
+-----------------
+
+Once a test plan has been created on the JMeter master, a test can be started for the test plan
+with the following command:
+
+.. code-block:: bash
+
+ $ cloverctl start testplan
+
+The test will be executed from the **clover-jmeter-master** pod, whereby HTTP requests will
+originate directly from the master. The number of aggregate threads/users and request rates
+can be scaled by increasing the thread count or decreasing the ramp time respectively in the
+test plan yaml. However, the scale of the test can also be controlled by adding slaves to the
+test. When slaves are employed, the master will only be used to control slaves and will not be
+a source of traffic. Each slave pod will execute the test plan in its entirety.
+
+To execute tests using slaves, add the flag '-s' to the start command from the Clover CLI as shown
+below:
+
+.. code-block:: bash
+
+ $ cloverctl start testplan –s <slave count>
+
+The **clover-jmeter-slave** pods must be deployed in advance before executing the above command. If
+the steps outlined in section `Deploy with Clover CLI`_ have been followed, three slaves will
+have already been deployed.
+
+Retrieving Results
+------------------
+
+Results for the test can be obtained by executing the following command:
+
+.. code-block:: bash
+
+ $ cloverctl get testresult
+ $ cloverctl get testresult log
+
+The bottom of the log will display a summary of the test results, as shown below::
+
+ 3 in 00:00:00 = 111.1/s Avg: 7 Min: 6 Max: 8 Err: 0 (0.00%)
+ 20 in 00:00:48 = 0.4/s Avg: 10 Min: 6 Max: 31 Err: 0 (0.00%)
+
+Each row of the summary table is a snapshot in time with the final numbers in the last row.
+In this example, 20 requests (5 users/threads x 2 URLs) x loops) were sent successfully
+with no HTTP responses with invalid/error (4xx/5xx) status codes. Longer tests will produce
+a larger number of snapshot rows. Minimum, maximum and average response times are output per
+snapshot.
+
+To obtain detailed, per-request results use the ``detail`` option, as shown below::
+
+ $ cloverctl get testresult detail
+
+ 1541567388622,14,url1,200,OK,ThreadGroup 1-4,text,true,,843,0,1,1,14,0,0
+ 1541567388637,8,url2,200,OK,ThreadGroup 1-4,text,true,,843,0,1,1,8,0,0
+ 1541567388646,6,url1,200,OK,ThreadGroup 1-4,text,true,,843,0,1,1,6,0,0
+ 1541567388653,7,url2,200,OK,ThreadGroup 1-4,text,true,,843,0,1,1,7,0,0
+ 1541567400622,12,url1,200,OK,ThreadGroup 1-5,text,true,,843,0,1,1,12,0,0
+ 1541567400637,8,url2,200,OK,ThreadGroup 1-5,text,true,,843,0,1,1,8,0,0
+ 1541567400645,7,url1,200,OK,ThreadGroup 1-5,text,true,,843,0,1,1,7,0,0
+ 1541567400653,6,url2,200,OK,ThreadGroup 1-5,text,true,,843,0,1,1,6,0,0
+
+Columns are broken down on the following fields:
+ * timeStamp, elapsed, label, responseCode, responseMessage, threadName, dataType, success
+ * failureMessage bytes, sentBytes, grpThreads, allThreads, Latency, IdleTime, Connect
+
+``elapsed`` or ``Latency`` values are in milliseconds.
+
+Uninstall from Kubernetes environment
+=====================================
+
+Delete with Clover CLI
+-----------------------
+
+When you're finished working with JMeter validation services, you can uninstall it with the
+following command:
+
+.. code-block:: bash
+
+ $ cloverctl delete system validation
+
+The command above will remove the clover-jmeter-master and clover-jmeter-slave deployment
+and service resources from the current k8s context.
+
+Delete from source
+------------------
+
+The JMeter validation services can be uninstalled from the source code using the commands below:
+
+.. code-block:: bash
+
+ $ cd clover/samples/scenarios
+ $ kubectl delete -f clover-jmeter-master.yaml
+ $ kubectl delete -f clover-jmeter-slave.yaml
+
+Uninstall from Docker environment
+=================================
+
+The OPNFV docker images can be removed with the following commands from nodes
+in the k8s cluster.
+
+.. code-block:: bash
+
+ $ docker rmi opnfv/clover-jmeter-master
+ $ docker rmi opnfv/clover-jmeter-slave
+ $ docker rmi opnfv/clover-controller
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index f345f61..9dd15b5 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -4,7 +4,7 @@
.. (c) Authors of Clover
-This document provides Clover project's release notes for the OPNFV Fraser release.
+This document provides Clover project's release notes for the OPNFV Gambia release.
.. contents::
:depth: 3
@@ -18,24 +18,37 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
-| 2018-03-14 | Fraser 1.0 | Stephen Wong | First draft |
+| 2018-03-14 | Gambia 1.0 | Stephen Wong | First draft |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
Important notes
===============
-The Clover project for OPNFV Fraser can ONLY be run on Kubernetes version 1.9 or
-later
+The Clover project for OPNFV Gambia is tested on Kubernetes version 1.9 and
+1.11. It is only tested on Istio 1.0.
Summary
=======
-Clover Fraser release provides tools for installation and validation of various
-upstream cloud native projects including Istio, fluentd, Jaegar, and Prometheus.
-In addition, the Fraser release also includes a sample VNF, its Kubernetes
-manifest, simple tools to validate route rules from Istio, as well as an
-example A-B testing framework.
+Clover Gambia release further enhances the Fraser release by providing various
+tools to help operators deploy cloud native network functions. These tools
+include
+
+#. Collector: gathers and collects metrics and traces from Prometheus and
+ Jaeger, respectively, and provides a single access point for such data
+#. Visibility: utilizes an analytic engine to correlate and organize data
+ gathered by the collector
+#. CLI: comprehensive Clover CLI called cloverctl, offering a single management
+ tool for operating Clover toolset
+#. Network Tracing: CNI plugin agnostic network tracing tool
+#. Extended HTTP Security: integrate modsecurity (Web Application Firewall) and
+ Snort with Istio gateway via Istio newly added mechanisms to redirect and
+ mirror traffic to the network functions
+#. HTTP Test Client: bundle JMeter as test client for testing
+#. UI: developmental / sample UI to offer single pane view of Clover system
+#. Spinnaker Integration: provides automated / programmable cloud provider
+ add/update/delete; sample pipeline and installation scripts
Release Data
============
@@ -47,13 +60,13 @@ Release Data
| **Repo/commit-ID** | |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser |
+| **Release designation** | Gambia |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2018-04-27
+| **Release date** | 2018-11-09
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Fraser release |
+| **Purpose of the delivery** | OPNFV Gambia release |
| | |
+--------------------------------------+--------------------------------------+
@@ -62,18 +75,20 @@ Version change
Module version changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-OPNFV Fraser marks the first release for Clover
+Clover Gambia release will no longer support Istio 0.6, the version of Istio
+supported by Clover Gambia release
Document version changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-OPNFV Fraser marks the first release for Clover
+Clover Gambia has updated the config guide and user guide accordingly, including
+new documents for the new features
Reason for version
^^^^^^^^^^^^^^^^^^^^
Feature additions
~~~~~~~~~~~~~~~~~~~~~~~
-<None> (no backlog)
+See Summary above
Bug corrections
~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index 5be100f..d09a9d7 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -3,9 +3,11 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Authors of Clover
-=================================
-OPNFV Clover User Guide
-=================================
+.. _clover_userguide:
+
+=================
+Clover User Guide
+=================
.. toctree::
:maxdepth: 1
diff --git a/docs/release/userguide/userguide.rst b/docs/release/userguide/userguide.rst
index d99359b..942c8f3 100644
--- a/docs/release/userguide/userguide.rst
+++ b/docs/release/userguide/userguide.rst
@@ -5,56 +5,49 @@
================================================================
-Clover User Guide (Fraser Release)
+Clover User Guide (Gambia Release)
================================================================
-This document provides the Clover user guide for the OPNFV Fraser release.
+This document provides the Clover user guide for the OPNFV Gambia release.
Description
===========
-As project Clover's first release, the Fraser release includes installation and simple
-validation of foundational upstream projects including Istio, fluentd, Jaeger, and
-Prometheus. The Clover Fraser release also provides a sample set of web-oriented network
-services, which follow a micro-service design pattern, its Kubernetes manifest, and an
-automated script to demonstrate a sample A-B testing use-case. The A-B sample script
-validates performance criteria using Istio request routing functionality leveraging
-the sample services deployed within Istio and the tracing data available within Jaeger.
+Clover Gambia builds on previous release to further enhance the toolset for
+cloud native network functions operations. The two emphasis on the release are:
-What is in Fraser?
+#. Integration of Spinnaker to support continuous delivery
+#. Centralizing Operational Data for Visibility
+
+What is in Gambia?
==================
* Sample micro-service composed VNF named Service Delivery Controller (SDC)
- * Logging module: fluentd and elasticsearch Kubernetes manifests,
- and fluentd installation validation
+ * Istio 1.0 support
- * Tracing module: Jaeger Kubernetes manifest, installation validation,
- Jaegar tracing query tools, and module for trace data output to datastore
+ * clover-collector: gathers and collects metrics and traces from Prometheus and
+ Jaeger, and provides a single access point for such data
- * Monitoring module: Prometheus Kubernetes manifest, installation
- validation, and sample Prometheous query of Istio related metrics
+ * Visibility: utilizes an analytic engine to correlate and organize data
+ collected by clover-collector
- * Istio route-rules sample yaml and validation tools
+ * cloverctl: Clover's new CLI
- * Test scripts
+ * Clovisor: Clover's cloud native, CNI-plugin agnostic network tracing tool
- * Sample code for an A-B testing demo shown during ONS North America 2018
+ * Integration of HTTP Security Modules with Istio 1.0
-Usage
-=====
+ * JMeter: integrating jmeter as test client
- * Python modules to validate installation of fluentd logging, Jaeger tracing, and
- Prometheus monitoring. Deployment and validation instructions can be found at:
- :ref:`logging`, :ref:`tracing`, and :ref:`monitoring` respectively.
+ * Clover UI: sample UI to offer single pane view / configuration point of the
+ Clover system
- * Deployment and usage of SDC sample
- - Services designed and implemented with micro-service design pattern
- - Tested and validated via Istio service mesh tools
- Detailed usage instructions for the sample can be found at :ref:`sdc_config_guide`
+ * Spinnaker Integration: add ability to add/update/delete cloud provider via
+ cloverctl, and sample pipeline utilized by Clover project to deploy SDC
- * An example use-case for A-B testing. Detailed usage instructions for this sample A-B
- validation can be found at: :ref:`a_b_config_guide`
- * Sample tool to validate Istio route rules:
- tools/python clover_validate_route_rules.py -s <service name> -t <test id>
+Usage
+=====
+
+ * Please refer to configguildes for usage detail on various modules
diff --git a/download/cloverctl.tar.gz b/download/cloverctl.tar.gz
new file mode 100644
index 0000000..0ec589a
--- /dev/null
+++ b/download/cloverctl.tar.gz
Binary files differ