summaryrefslogtreecommitdiffstats
path: root/ansible/prepare_env.yml
blob: 321521334bc65a5d8583d3fc411fc322da143a8d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Copyright (c) 2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- hosts: yardstick
  vars_files:
    - yardstick_config.yml

  pre_tasks:
    - name: check for openrc
      stat: path="{{ OPENRC }}"
      ignore_errors: yes
      register: openrc_stat

    - set_fact:
        openrc_present: "{{ openrc_stat.stat.isreg|default(False) }}"

    - fail: msg="{{ INSTALLER_TYPE }} not in {{ INSTALLERS }}"
      when: not openrc_present and (INSTALLER_TYPE not in INSTALLERS)


  roles:
    - role: convert_openrc
      openrc_file: "{{ OPENRC }}"
      cloud_yaml_path: "{{ opnfv_root }}/clouds.yaml"
    - role: create_storperf_admin_rc
    - role: add_fuel_jumphost
      when: "INSTALLER_TYPE == 'fuel'"


  post_tasks:
    # if huawei-pod1 then copy storeperf_adminrc to deployment location?
    - name: add huawei-pod1 so we can copy storeperf_admin-rc to pod
      add_host:
        name: huawei-pod1
        ansible_ssh_host: "192.168.200.1"
        ansible_ssh_pass: root
        ansible_ssh_use: root
      when: "NODE_NAME == 'huawei-pod1'"


- hosts: huawei-pod1
  tasks:
    - name: copy a admin-rc file for StorPerf integration to the deployment location
      copy:
        src: "{{ storperf_rc }}"
        dest: /root/


- hosts: fuel_jumphost
  roles:
    - create_fuel_pod_yaml
span class="nb">len(vlans) > 0: host.lab.vlan_manager.release_vlans(vlans) interface.config.clear() network.add_interface(interface) network.save() hostrelation.status = JobStatus.NEW hostrelation.save() def cleanup_software(qs): if qs.exists(): relation = qs.first() software = relation.config.opnfv software.clear_delta() software.save() relation.status = JobStatus.NEW relation.save() def cleanup_access(qs): for relation in qs: if "vpn" in relation.config.access_type.lower(): relation.config.set_revoke(True) relation.config.save() relation.status = JobStatus.NEW relation.save() cleanup_set = Booking.objects.filter(end__lte=timezone.now()).filter(job__complete=False) for booking in cleanup_set: if not booking.job.complete: job = booking.job cleanup_software(SoftwareRelation.objects.filter(job=job)) cleanup_hardware(HostHardwareRelation.objects.filter(job=job)) cleanup_network(HostNetworkRelation.objects.filter(job=job)) cleanup_access(AccessRelation.objects.filter(job=job)) job.complete = True job.save() NotificationHandler.notify_booking_end(booking) @shared_task def free_hosts(): """ gets all hosts from the database that need to be freed and frees them """ undone_statuses = [JobStatus.NEW, JobStatus.CURRENT, JobStatus.ERROR] undone_jobs = Job.objects.filter( hostnetworkrelation__status__in=undone_statuses, hosthardwarerelation__status__in=undone_statuses ) bookings = Booking.objects.exclude( job__in=undone_jobs ).filter( end__lt=timezone.now(), job__complete=True, resource__isnull=False ) for booking in bookings: ResourceManager.getInstance().deleteResourceBundle(booking.resource)