summaryrefslogtreecommitdiffstats
path: root/build/f_isoroot/f_repobuild/Makefile
blob: 891712b9ac7f18b8c6213eb470813ac7307333f0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
##############################################################################
# Copyright (c) 2015,2016 Ericsson AB, Enea AB and others.
# stefan.k.berg@ericsson.com
# jonas.bjurel@ericsson.com
# Alexandru.Avadanii@enea.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

SHELL := /bin/bash
TOP := $(shell pwd)

include ../../config.mk
include config.mk

export MOS_VERSION
export MIRROR_UBUNTU_OPNFV_PATH:=$(TOP)/nailgun/mirrors/ubuntu

.PHONY: all
all: nailgun

nailgun:
	sudo apt-get install -y createrepo git libxml2-dev libxslt1-dev \
		python-dev zlib1g-dev
	rm -Rf nailgun packetary opnfv_config && mkdir opnfv_config
	# We will analyze fuel-web's fixture files for package lists
	ln -sf ${F_SUBMOD_DIR}/fuel-web fuel-web
	git clone --quiet $(PACKETARY_REPO)
	if [ -n $(PACKETARY_COMMIT) ]; then \
		git -C packetary checkout $(PACKETARY_COMMIT); \
	fi
	sudo pip install -U -r ./packetary/requirements.txt
	sudo pip install -U ./packetary
	# Handle config and mirror build in one place
	./opnfv_mirror_ubuntu.py
	# Store artifact in cache straight away if caching is enabled
	# (no .cacheid will be present unless this is a cached build)
	test -f .cacheid && $(MAKE) -f Makefile put-cache || exit 0

.PHONY: clean
clean:
	@rm -rf ../release/opnfv/nailgun nailgun packetary fuel-web opnfv_config

.PHONY: release
release:nailgun
	@rm -Rf ../release/opnfv/nailgun
	@mkdir -p ../release/opnfv
	@cp -Rp nailgun ../release/opnfv/nailgun
	@cp fuel_bootstrap_cli.yaml ../release/opnfv/

############################################################################
# Cache operations - only used when building through ci/build.sh
############################################################################

# Create a unique hash to be used for getting and putting cache, based on:
#   - Year and week (causing the cache to be rebuilt weekly)
#   - The contents of this Makefile + all sh,mk,py,yaml files in CWD
#   - repo packages fingerprint
#   - repo arch list
.cacheid:
	date +"Repocache %G%V" > .cachedata
	sha1sum Makefile *.{sh,mk,py,yaml} >> .cachedata
	$(CACHETOOL) packages >> .cachedata
	echo -n $(UBUNTU_ARCH) | sha1sum | awk {'print $$1'} >> .cachedata
	cat .cachedata | $(CACHETOOL) getid > .cacheid

# Clean local data related to caching - called prior to ordinary build
.PHONY: clean-cache
clean-cache: clean
	rm -f .cachedata .cacheid

# Try to download cache - called prior to ordinary build
.PHONY: get-cache
get-cache: .cacheid
	@if $(CACHETOOL) check $(shell cat .cacheid); then \
		 $(CACHETOOL) get $(shell cat .cacheid) | tar xf -;\
	else \
		echo "No cache item found for $(shell cat .cacheid)" ;\
		exit 0;\
	fi

# Store cache if not already stored - called after ordinary build
.PHONY: put-cache
put-cache: .cacheid
	@tar cf - nailgun | $(CACHETOOL) put $(shell cat .cacheid)
nt_install_sh="/home/opnfv/bottlenecks/monitor/dispatch/install_barometer_client.sh" barometer_client_install_conf="/home/opnfv/bottlenecks/monitor/config/barometer_client.conf" cadvisor_client_install_sh="/home/opnfv/bottlenecks/monitor/dispatch/install_cadvisor_client.sh" collectd_client_install_sh="/home/opnfv/bottlenecks/monitor/dispatch/install_collectd_client.sh" collectd_client_install_conf="/home/opnfv/bottlenecks/monitor/config/collectd_client.conf" # INSTALL GRAFANA + PROMETHEUS + CADVISOR + BAROMETER on the JUMPERSERVER # # Node-Exporter echo == installation of monitoring module is started == # # Collectd # # Configure IP Address in collectd server configuration # python ${DISPATCH}/server_ip_configure.py ${MONITOR_CONFIG}/collectd_server.conf # sudo docker run --name bottlenecks-collectd -d \ # --privileged \ # -v ${MONITOR_CONFIG}/collectd_server.conf:/etc/collectd/collectd.conf:ro \ # -v /proc:/mnt/proc:ro \ # fr3nd/collectd:5.5.0-1 set +e # Prometheus sudo docker run --name bottlenecks-prometheus \ -d -p 9090:9090 \ -v ${MONITOR_CONFIG}/prometheus.yaml:/etc/prometheus/prometheus.yml \ prom/prometheus:v1.7.1 # Collectd-Exporter sudo docker run --name bottlenecks-collectd-exporter \ -d -p 9103:9103 -p 25826:25826/udp \ prom/collectd-exporter:0.3.1 \ -collectd.listen-address=":25826" sudo docker run --name bottlenecks-node-exporter \ -d -p 9100:9100 \ -v "/proc:/host/proc:ro" \ -v "/sys:/host/sys:ro" \ -v "/:/rootfs:ro" \ --net="host" \ quay.io/prometheus/node-exporter:v0.14.0 \ -collector.procfs /host/proc \ -collector.sysfs /host/sys \ -collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc)($|/)" # Openstack-Exporter sudo docker run --name bottlenecks-openstack-exporter \ -v /tmp:/tmp \ -p 9104:9104 --env-file ${OPENSTACK_ENV} \ -d gabrielyuyang/openstack-exporter:1.0 # Grafana sudo docker run --name bottlenecks-grafana \ -d -p 3000:3000 \ -v ${MONITOR_CONFIG}/grafana.ini:/etc/grafana/grafana.ini \ grafana/grafana:4.5.0 # Automate Prometheus Datasource and Grafana Dashboard creation set -e sleep 5 python ${DISPATCH}/../dashboard/automated_dashboard_datasource.py set +e # Cadvisor sudo docker run \ --volume=/:/rootfs:ro \ --volume=/var/run:/var/run:rw \ --volume=/sys:/sys:ro \ --volume=/var/lib/docker/:/var/lib/docker:ro \ --volume=/dev/disk/:/dev/disk:ro \ --publish=8080:8080 \ --detach=true \ --name=bottlenecks-cadvisor \ google/cadvisor:v0.25.0 set -e # Barometer # Configure IP Address in barometer server configuration sleep 5 python ${DISPATCH}/server_ip_configure.py ${MONITOR_CONFIG}/barometer_server.conf set +e # Install on jumpserver docker pull opnfv/barometer sudo docker run --name bottlenecks-barometer -d --net=host \ -v ${MONITOR_CONFIG}/barometer_server.conf:/src/barometer/src/collectd/collectd/src/collectd.conf \ -v ${MONITOR_CONFIG}/barometer_server.conf:/opt/collectd/etc/collectd.conf \ -v /var/run:/var/run \ -v /tmp:/tmp \ --privileged opnfv/barometer /run_collectd.sh set -e # INSTALL BAROMETER + CADVISOR (+ COLLECTD) CLIENTS on COMPUTE/CONTROL NODES # Configure IP Address in barometer client configuration python ${DISPATCH}/client_ip_configure.py ${MONITOR_CONFIG}/barometer_client.conf # Automate Barometer client installation python ${DISPATCH}/install_clients.py \ -i ${INSTALLER_TYPE} -s ${barometer_client_install_sh} \ -c ${barometer_client_install_conf} # # Configure IP Address in collectd client configuration # python ${DISPATCH}/client_ip_configure.py ${MONITOR_CONFIG}/collectd_client.conf # # Automate Collectd Client installation # python ${DISPATCH}/automate_collectd_client.py # Automate Cadvisor Client python ${DISPATCH}/install_clients.py \ -i ${INSTALLER_TYPE} -s ${cadvisor_client_install_sh} echo == installation of monitoring module is finished ==